summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c16
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c3
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c39
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c4
-rw-r--r--drivers/net/ethernet/aurora/Kconfig23
-rw-r--r--drivers/net/ethernet/aurora/Makefile2
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1520
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h316
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig9
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c689
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.h96
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c186
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h14
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c82
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c114
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h59
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c461
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c11
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h344
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c9
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h59
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c36
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h1
-rw-r--r--drivers/net/ethernet/broadcom/unimac.h68
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c3
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c13
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/common.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c13
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h6
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c52
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c152
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h15
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c156
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h17
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c93
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c61
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c64
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c549
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c101
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c56
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c165
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c27
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c330
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c333
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c31
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c215
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h54
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c204
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c180
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h8
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c393
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/e100.c92
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h26
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c949
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h169
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c752
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c382
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c617
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h174
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c656
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c153
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c15
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c2
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c40
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c47
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c273
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c445
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c142
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c114
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c662
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c1059
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_status.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c175
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h158
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c72
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c78
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c48
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c24
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.c18
-rw-r--r--drivers/net/ethernet/intel/igc/igc_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c42
-rw-r--r--drivers/net/ethernet/marvell/Kconfig1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c80
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h134
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c574
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c79
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c573
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h71
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h131
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c59
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h163
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c272
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h57
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c166
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h84
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c214
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c261
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c130
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c430
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c652
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c139
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h32
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h614
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c181
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c237
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h135
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c551
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c155
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c78
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c52
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c93
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c43
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c216
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c984
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c289
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h175
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c499
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c1653
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c457
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c488
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c517
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h76
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c71
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c678
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c431
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qos.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c275
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c556
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c233
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c588
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c1619
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c1640
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c1633
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h193
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h434
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c196
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c214
-rw-r--r--drivers/net/ethernet/micrel/Kconfig4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c114
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c385
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h25
-rw-r--r--drivers/net/ethernet/mscc/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Makefile4
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c728
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c885
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c175
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c602
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c19
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h295
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c331
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c15
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c14
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c83
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c49
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c22
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h10
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c134
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c12
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c35
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c196
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c456
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c1
-rw-r--r--drivers/net/ethernet/renesas/ravb.h37
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/rocker/rocker.h6
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c63
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c45
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c6
-rw-r--r--drivers/net/ethernet/sfc/rx.c10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c10
-rw-r--r--drivers/net/ethernet/socionext/netsec.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c285
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h2
-rw-r--r--drivers/net/ethernet/ti/Kconfig10
-rw-r--r--drivers/net/ethernet/ti/Makefile1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c607
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h28
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c538
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.h34
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c91
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c8
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c18
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h29
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c94
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c3
404 files changed, 35794 insertions, 12303 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index de50e8b9e656..ad04660b97b8 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,7 +33,6 @@ source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
-source "drivers/net/ethernet/aurora/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/cadence/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index f8f38dcb5f8a..1e7dc8a7762d 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
-obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 06596fa1f9fe..102f2c91fdb8 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -404,6 +404,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
if (unlikely(!xdpf)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = XDP_ABORTED;
break;
}
@@ -424,7 +425,10 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
xdp_stat = &rx_ring->rx_stats.xdp_redirect;
break;
}
- fallthrough;
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = XDP_ABORTED;
+ break;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
@@ -1585,10 +1589,9 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
int ret;
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
- xdp->data = page_address(rx_info->page) + rx_info->page_offset;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_hard_start = page_address(rx_info->page);
- xdp->data_end = xdp->data + rx_ring->ena_bufs[0].len;
+ xdp_prepare_buff(xdp, page_address(rx_info->page),
+ rx_info->page_offset,
+ rx_ring->ena_bufs[0].len, false);
/* If for some reason we received a bigger packet than
* we expect, then we simply drop it
*/
@@ -1634,8 +1637,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid);
res_budget = budget;
- xdp.rxq = &rx_ring->xdp_rxq;
- xdp.frame_sz = ENA_PAGE_SIZE;
+ xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
do {
xdp_verdict = XDP_PASS;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b40d4377cc71..b2cd3bdba9f8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1279,10 +1279,18 @@
#define MDIO_PMA_10GBR_FECCTRL 0x00ab
#endif
+#ifndef MDIO_PMA_RX_CTRL1
+#define MDIO_PMA_RX_CTRL1 0x8051
+#endif
+
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
+#ifndef MDIO_PCS_DIGITAL_STAT
+#define MDIO_PCS_DIGITAL_STAT 0x8010
+#endif
+
#ifndef MDIO_AN_XNP
#define MDIO_AN_XNP 0x0016
#endif
@@ -1358,6 +1366,8 @@
#define XGBE_KR_TRAINING_ENABLE BIT(1)
#define XGBE_PCS_CL37_BP BIT(12)
+#define XGBE_PCS_PSEQ_STATE_MASK 0x1c
+#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10
#define XGBE_AN_CL37_INT_CMPLT BIT(0)
#define XGBE_AN_CL37_INT_MASK 0x01
@@ -1375,6 +1385,10 @@
#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
#define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+#define XGBE_PMA_RX_RST_0_MASK BIT(4)
+#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
+#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 2709a2db5657..4f714f874c4f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1368,6 +1368,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
return;
netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(pdata->netdev);
xgbe_stop_timers(pdata);
flush_workqueue(pdata->dev_workqueue);
@@ -2295,8 +2296,6 @@ static const struct net_device_ops xgbe_netdev_ops = {
.ndo_setup_tc = xgbe_setup_tc,
.ndo_fix_features = xgbe_fix_features,
.ndo_set_features = xgbe_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = xgbe_features_check,
};
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 93ef5a30cb8d..4e97b4869522 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
&an_restart);
if (an_restart) {
xgbe_phy_config_aneg(pdata);
- return;
+ goto adjust_link;
}
if (pdata->phy.link) {
@@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
pdata->phy_if.phy_impl.stop(pdata);
pdata->phy.link = 0;
- netif_carrier_off(pdata->netdev);
xgbe_phy_adjust_link(pdata);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 859ded0c06b0..18e48b3bc402 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -922,6 +922,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
if ((phy_id & 0xfffffff0) != 0x03625d10)
return false;
+ /* Reset PHY - wait for self-clearing reset bit to clear */
+ genphy_soft_reset(phy_data->phydev);
+
/* Disable RGMII mode */
phy_write(phy_data->phydev, 0x18, 0x7007);
reg = phy_read(phy_data->phydev, 0x18);
@@ -1953,6 +1956,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
+static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT,
+ XGBE_PCS_PSEQ_STATE_MASK);
+ if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) {
+ /* Mailbox command timed out, reset of RX block is required.
+ * This can be done by asseting the reset bit and wait for
+ * its compeletion.
+ */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
+ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON);
+ ndelay(20);
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
+ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
+ usleep_range(40, 50);
+ netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n");
+ }
+}
+
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
unsigned int cmd, unsigned int sub_cmd)
{
@@ -1960,9 +1984,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
unsigned int wait;
/* Log if a previous command did not complete */
- if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
netif_dbg(pdata, link, pdata->netdev,
"firmware mailbox not ready for command\n");
+ xgbe_phy_rx_reset(pdata);
+ }
/* Construct the command */
XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
@@ -1984,6 +2010,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
netif_dbg(pdata, link, pdata->netdev,
"firmware mailbox command did not complete\n");
+
+ /* Reset on error */
+ xgbe_phy_rx_reset(pdata);
}
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
@@ -2584,6 +2613,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
if (reg & MDIO_STAT1_LSTATUS)
return 1;
+ if (pdata->phy.autoneg == AUTONEG_ENABLE &&
+ phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) {
+ if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
+ netif_carrier_off(pdata->netdev);
+ *an_restart = 1;
+ }
+ }
+
/* No link, attempt a receiver reset cycle */
if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
phy_data->rrc_count = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 8f70a3909929..4af0cd9530de 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -71,8 +71,10 @@ static int aq_ndev_open(struct net_device *ndev)
goto err_exit;
err = aq_nic_start(aq_nic);
- if (err < 0)
+ if (err < 0) {
+ aq_nic_stop(aq_nic);
goto err_exit;
+ }
err_exit:
if (err < 0)
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
deleted file mode 100644
index 9ee30ea90bfa..000000000000
--- a/drivers/net/ethernet/aurora/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config NET_VENDOR_AURORA
- bool "Aurora VLSI devices"
- default y
- help
- If you have a network (Ethernet) device belonging to this class,
- say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- questions about Aurora devices. If you say Y, you will be asked
- for your specific device in the following questions.
-
-if NET_VENDOR_AURORA
-
-config AURORA_NB8800
- tristate "Aurora AU-NB8800 support"
- depends on HAS_DMA
- select PHYLIB
- help
- Support for the AU-NB8800 gigabit Ethernet controller.
-
-endif
diff --git a/drivers/net/ethernet/aurora/Makefile b/drivers/net/ethernet/aurora/Makefile
deleted file mode 100644
index f3d599867619..000000000000
--- a/drivers/net/ethernet/aurora/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_AURORA_NB8800) += nb8800.o
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
deleted file mode 100644
index 5b20185cbd62..000000000000
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ /dev/null
@@ -1,1520 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015 Mans Rullgard <mans@mansr.com>
- *
- * Mostly rewritten, based on driver from Sigma Designs. Original
- * copyright notice below.
- *
- * Driver for tangox SMP864x/SMP865x/SMP867x/SMP868x builtin Ethernet Mac.
- *
- * Copyright (C) 2005 Maxime Bizon <mbizon@freebox.fr>
- */
-
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_mdio.h>
-#include <linux/of_net.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/cache.h>
-#include <linux/jiffies.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <asm/barrier.h>
-
-#include "nb8800.h"
-
-static void nb8800_tx_done(struct net_device *dev);
-static int nb8800_dma_stop(struct net_device *dev);
-
-static inline u8 nb8800_readb(struct nb8800_priv *priv, int reg)
-{
- return readb_relaxed(priv->base + reg);
-}
-
-static inline u32 nb8800_readl(struct nb8800_priv *priv, int reg)
-{
- return readl_relaxed(priv->base + reg);
-}
-
-static inline void nb8800_writeb(struct nb8800_priv *priv, int reg, u8 val)
-{
- writeb_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_writew(struct nb8800_priv *priv, int reg, u16 val)
-{
- writew_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_writel(struct nb8800_priv *priv, int reg, u32 val)
-{
- writel_relaxed(val, priv->base + reg);
-}
-
-static inline void nb8800_maskb(struct nb8800_priv *priv, int reg,
- u32 mask, u32 val)
-{
- u32 old = nb8800_readb(priv, reg);
- u32 new = (old & ~mask) | (val & mask);
-
- if (new != old)
- nb8800_writeb(priv, reg, new);
-}
-
-static inline void nb8800_maskl(struct nb8800_priv *priv, int reg,
- u32 mask, u32 val)
-{
- u32 old = nb8800_readl(priv, reg);
- u32 new = (old & ~mask) | (val & mask);
-
- if (new != old)
- nb8800_writel(priv, reg, new);
-}
-
-static inline void nb8800_modb(struct nb8800_priv *priv, int reg, u8 bits,
- bool set)
-{
- nb8800_maskb(priv, reg, bits, set ? bits : 0);
-}
-
-static inline void nb8800_setb(struct nb8800_priv *priv, int reg, u8 bits)
-{
- nb8800_maskb(priv, reg, bits, bits);
-}
-
-static inline void nb8800_clearb(struct nb8800_priv *priv, int reg, u8 bits)
-{
- nb8800_maskb(priv, reg, bits, 0);
-}
-
-static inline void nb8800_modl(struct nb8800_priv *priv, int reg, u32 bits,
- bool set)
-{
- nb8800_maskl(priv, reg, bits, set ? bits : 0);
-}
-
-static inline void nb8800_setl(struct nb8800_priv *priv, int reg, u32 bits)
-{
- nb8800_maskl(priv, reg, bits, bits);
-}
-
-static inline void nb8800_clearl(struct nb8800_priv *priv, int reg, u32 bits)
-{
- nb8800_maskl(priv, reg, bits, 0);
-}
-
-static int nb8800_mdio_wait(struct mii_bus *bus)
-{
- struct nb8800_priv *priv = bus->priv;
- u32 val;
-
- return readl_poll_timeout_atomic(priv->base + NB8800_MDIO_CMD,
- val, !(val & MDIO_CMD_GO), 1, 1000);
-}
-
-static int nb8800_mdio_cmd(struct mii_bus *bus, u32 cmd)
-{
- struct nb8800_priv *priv = bus->priv;
- int err;
-
- err = nb8800_mdio_wait(bus);
- if (err)
- return err;
-
- nb8800_writel(priv, NB8800_MDIO_CMD, cmd);
- udelay(10);
- nb8800_writel(priv, NB8800_MDIO_CMD, cmd | MDIO_CMD_GO);
-
- return nb8800_mdio_wait(bus);
-}
-
-static int nb8800_mdio_read(struct mii_bus *bus, int phy_id, int reg)
-{
- struct nb8800_priv *priv = bus->priv;
- u32 val;
- int err;
-
- err = nb8800_mdio_cmd(bus, MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg));
- if (err)
- return err;
-
- val = nb8800_readl(priv, NB8800_MDIO_STS);
- if (val & MDIO_STS_ERR)
- return 0xffff;
-
- return val & 0xffff;
-}
-
-static int nb8800_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
-{
- u32 cmd = MDIO_CMD_ADDR(phy_id) | MDIO_CMD_REG(reg) |
- MDIO_CMD_DATA(val) | MDIO_CMD_WR;
-
- return nb8800_mdio_cmd(bus, cmd);
-}
-
-static void nb8800_mac_tx(struct net_device *dev, bool enable)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- while (nb8800_readl(priv, NB8800_TXC_CR) & TCR_EN)
- cpu_relax();
-
- nb8800_modb(priv, NB8800_TX_CTL1, TX_EN, enable);
-}
-
-static void nb8800_mac_rx(struct net_device *dev, bool enable)
-{
- nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_EN, enable);
-}
-
-static void nb8800_mac_af(struct net_device *dev, bool enable)
-{
- nb8800_modb(netdev_priv(dev), NB8800_RX_CTL, RX_AF_EN, enable);
-}
-
-static void nb8800_start_rx(struct net_device *dev)
-{
- nb8800_setl(netdev_priv(dev), NB8800_RXC_CR, RCR_EN);
-}
-
-static int nb8800_alloc_rx(struct net_device *dev, unsigned int i, bool napi)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
- struct nb8800_rx_buf *rxb = &priv->rx_bufs[i];
- int size = L1_CACHE_ALIGN(RX_BUF_SIZE);
- dma_addr_t dma_addr;
- struct page *page;
- unsigned long offset;
- void *data;
-
- data = napi ? napi_alloc_frag(size) : netdev_alloc_frag(size);
- if (!data)
- return -ENOMEM;
-
- page = virt_to_head_page(data);
- offset = data - page_address(page);
-
- dma_addr = dma_map_page(&dev->dev, page, offset, RX_BUF_SIZE,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(&dev->dev, dma_addr)) {
- skb_free_frag(data);
- return -ENOMEM;
- }
-
- rxb->page = page;
- rxb->offset = offset;
- rxd->desc.s_addr = dma_addr;
-
- return 0;
-}
-
-static void nb8800_receive(struct net_device *dev, unsigned int i,
- unsigned int len)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd = &priv->rx_descs[i];
- struct page *page = priv->rx_bufs[i].page;
- int offset = priv->rx_bufs[i].offset;
- void *data = page_address(page) + offset;
- dma_addr_t dma = rxd->desc.s_addr;
- struct sk_buff *skb;
- unsigned int size;
- int err;
-
- size = len <= RX_COPYBREAK ? len : RX_COPYHDR;
-
- skb = napi_alloc_skb(&priv->napi, size);
- if (!skb) {
- netdev_err(dev, "rx skb allocation failed\n");
- dev->stats.rx_dropped++;
- return;
- }
-
- if (len <= RX_COPYBREAK) {
- dma_sync_single_for_cpu(&dev->dev, dma, len, DMA_FROM_DEVICE);
- skb_put_data(skb, data, len);
- dma_sync_single_for_device(&dev->dev, dma, len,
- DMA_FROM_DEVICE);
- } else {
- err = nb8800_alloc_rx(dev, i, true);
- if (err) {
- netdev_err(dev, "rx buffer allocation failed\n");
- dev->stats.rx_dropped++;
- dev_kfree_skb(skb);
- return;
- }
-
- dma_unmap_page(&dev->dev, dma, RX_BUF_SIZE, DMA_FROM_DEVICE);
- skb_put_data(skb, data, RX_COPYHDR);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- offset + RX_COPYHDR, len - RX_COPYHDR,
- RX_BUF_SIZE);
- }
-
- skb->protocol = eth_type_trans(skb, dev);
- napi_gro_receive(&priv->napi, skb);
-}
-
-static void nb8800_rx_error(struct net_device *dev, u32 report)
-{
- if (report & RX_LENGTH_ERR)
- dev->stats.rx_length_errors++;
-
- if (report & RX_FCS_ERR)
- dev->stats.rx_crc_errors++;
-
- if (report & RX_FIFO_OVERRUN)
- dev->stats.rx_fifo_errors++;
-
- if (report & RX_ALIGNMENT_ERROR)
- dev->stats.rx_frame_errors++;
-
- dev->stats.rx_errors++;
-}
-
-static int nb8800_poll(struct napi_struct *napi, int budget)
-{
- struct net_device *dev = napi->dev;
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd;
- unsigned int last = priv->rx_eoc;
- unsigned int next;
- int work = 0;
-
- nb8800_tx_done(dev);
-
-again:
- do {
- unsigned int len;
-
- next = (last + 1) % RX_DESC_COUNT;
-
- rxd = &priv->rx_descs[next];
-
- if (!rxd->report)
- break;
-
- len = RX_BYTES_TRANSFERRED(rxd->report);
-
- if (IS_RX_ERROR(rxd->report))
- nb8800_rx_error(dev, rxd->report);
- else
- nb8800_receive(dev, next, len);
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
-
- if (rxd->report & RX_MULTICAST_PKT)
- dev->stats.multicast++;
-
- rxd->report = 0;
- last = next;
- work++;
- } while (work < budget);
-
- if (work) {
- priv->rx_descs[last].desc.config |= DESC_EOC;
- wmb(); /* ensure new EOC is written before clearing old */
- priv->rx_descs[priv->rx_eoc].desc.config &= ~DESC_EOC;
- priv->rx_eoc = last;
- nb8800_start_rx(dev);
- }
-
- if (work < budget) {
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
-
- /* If a packet arrived after we last checked but
- * before writing RX_ITR, the interrupt will be
- * delayed, so we retrieve it now.
- */
- if (priv->rx_descs[next].report)
- goto again;
-
- napi_complete_done(napi, work);
- }
-
- return work;
-}
-
-static void __nb8800_tx_dma_start(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_buf *txb;
- u32 txc_cr;
-
- txb = &priv->tx_bufs[priv->tx_queue];
- if (!txb->ready)
- return;
-
- txc_cr = nb8800_readl(priv, NB8800_TXC_CR);
- if (txc_cr & TCR_EN)
- return;
-
- nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
- wmb(); /* ensure desc addr is written before starting DMA */
- nb8800_writel(priv, NB8800_TXC_CR, txc_cr | TCR_EN);
-
- priv->tx_queue = (priv->tx_queue + txb->chain_len) % TX_DESC_COUNT;
-}
-
-static void nb8800_tx_dma_start(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- spin_lock_irq(&priv->tx_lock);
- __nb8800_tx_dma_start(dev);
- spin_unlock_irq(&priv->tx_lock);
-}
-
-static void nb8800_tx_dma_start_irq(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- spin_lock(&priv->tx_lock);
- __nb8800_tx_dma_start(dev);
- spin_unlock(&priv->tx_lock);
-}
-
-static netdev_tx_t nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_desc *txd;
- struct nb8800_tx_buf *txb;
- struct nb8800_dma_desc *desc;
- dma_addr_t dma_addr;
- unsigned int dma_len;
- unsigned int align;
- unsigned int next;
- bool xmit_more;
-
- if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
- netif_stop_queue(dev);
- return NETDEV_TX_BUSY;
- }
-
- align = (8 - (uintptr_t)skb->data) & 7;
-
- dma_len = skb->len - align;
- dma_addr = dma_map_single(&dev->dev, skb->data + align,
- dma_len, DMA_TO_DEVICE);
-
- if (dma_mapping_error(&dev->dev, dma_addr)) {
- netdev_err(dev, "tx dma mapping error\n");
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return NETDEV_TX_OK;
- }
-
- xmit_more = netdev_xmit_more();
- if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
- netif_stop_queue(dev);
- xmit_more = false;
- }
-
- next = priv->tx_next;
- txb = &priv->tx_bufs[next];
- txd = &priv->tx_descs[next];
- desc = &txd->desc[0];
-
- next = (next + 1) % TX_DESC_COUNT;
-
- if (align) {
- memcpy(txd->buf, skb->data, align);
-
- desc->s_addr =
- txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
- desc->n_addr = txb->dma_desc + sizeof(txd->desc[0]);
- desc->config = DESC_BTS(2) | DESC_DS | align;
-
- desc++;
- }
-
- desc->s_addr = dma_addr;
- desc->n_addr = priv->tx_bufs[next].dma_desc;
- desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
-
- if (!xmit_more)
- desc->config |= DESC_EOC;
-
- txb->skb = skb;
- txb->dma_addr = dma_addr;
- txb->dma_len = dma_len;
-
- if (!priv->tx_chain) {
- txb->chain_len = 1;
- priv->tx_chain = txb;
- } else {
- priv->tx_chain->chain_len++;
- }
-
- netdev_sent_queue(dev, skb->len);
-
- priv->tx_next = next;
-
- if (!xmit_more) {
- smp_wmb();
- priv->tx_chain->ready = true;
- priv->tx_chain = NULL;
- nb8800_tx_dma_start(dev);
- }
-
- return NETDEV_TX_OK;
-}
-
-static void nb8800_tx_error(struct net_device *dev, u32 report)
-{
- if (report & TX_LATE_COLLISION)
- dev->stats.collisions++;
-
- if (report & TX_PACKET_DROPPED)
- dev->stats.tx_dropped++;
-
- if (report & TX_FIFO_UNDERRUN)
- dev->stats.tx_fifo_errors++;
-
- dev->stats.tx_errors++;
-}
-
-static void nb8800_tx_done(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int limit = priv->tx_next;
- unsigned int done = priv->tx_done;
- unsigned int packets = 0;
- unsigned int len = 0;
-
- while (done != limit) {
- struct nb8800_tx_desc *txd = &priv->tx_descs[done];
- struct nb8800_tx_buf *txb = &priv->tx_bufs[done];
- struct sk_buff *skb;
-
- if (!txd->report)
- break;
-
- skb = txb->skb;
- len += skb->len;
-
- dma_unmap_single(&dev->dev, txb->dma_addr, txb->dma_len,
- DMA_TO_DEVICE);
-
- if (IS_TX_ERROR(txd->report)) {
- nb8800_tx_error(dev, txd->report);
- kfree_skb(skb);
- } else {
- consume_skb(skb);
- }
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += TX_BYTES_TRANSFERRED(txd->report);
- dev->stats.collisions += TX_EARLY_COLLISIONS(txd->report);
-
- txb->skb = NULL;
- txb->ready = false;
- txd->report = 0;
-
- done = (done + 1) % TX_DESC_COUNT;
- packets++;
- }
-
- if (packets) {
- smp_mb__before_atomic();
- atomic_add(packets, &priv->tx_free);
- netdev_completed_queue(dev, packets, len);
- netif_wake_queue(dev);
- priv->tx_done = done;
- }
-}
-
-static irqreturn_t nb8800_irq(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct nb8800_priv *priv = netdev_priv(dev);
- irqreturn_t ret = IRQ_NONE;
- u32 val;
-
- /* tx interrupt */
- val = nb8800_readl(priv, NB8800_TXC_SR);
- if (val) {
- nb8800_writel(priv, NB8800_TXC_SR, val);
-
- if (val & TSR_DI)
- nb8800_tx_dma_start_irq(dev);
-
- if (val & TSR_TI)
- napi_schedule_irqoff(&priv->napi);
-
- if (unlikely(val & TSR_DE))
- netdev_err(dev, "TX DMA error\n");
-
- /* should never happen with automatic status retrieval */
- if (unlikely(val & TSR_TO))
- netdev_err(dev, "TX Status FIFO overflow\n");
-
- ret = IRQ_HANDLED;
- }
-
- /* rx interrupt */
- val = nb8800_readl(priv, NB8800_RXC_SR);
- if (val) {
- nb8800_writel(priv, NB8800_RXC_SR, val);
-
- if (likely(val & (RSR_RI | RSR_DI))) {
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_poll);
- napi_schedule_irqoff(&priv->napi);
- }
-
- if (unlikely(val & RSR_DE))
- netdev_err(dev, "RX DMA error\n");
-
- /* should never happen with automatic status retrieval */
- if (unlikely(val & RSR_RO))
- netdev_err(dev, "RX Status FIFO overflow\n");
-
- ret = IRQ_HANDLED;
- }
-
- return ret;
-}
-
-static void nb8800_mac_config(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- bool gigabit = priv->speed == SPEED_1000;
- u32 mac_mode_mask = RGMII_MODE | HALF_DUPLEX | GMAC_MODE;
- u32 mac_mode = 0;
- u32 slot_time;
- u32 phy_clk;
- u32 ict;
-
- if (!priv->duplex)
- mac_mode |= HALF_DUPLEX;
-
- if (gigabit) {
- if (phy_interface_is_rgmii(dev->phydev))
- mac_mode |= RGMII_MODE;
-
- mac_mode |= GMAC_MODE;
- phy_clk = 125000000;
-
- /* Should be 512 but register is only 8 bits */
- slot_time = 255;
- } else {
- phy_clk = 25000000;
- slot_time = 128;
- }
-
- ict = DIV_ROUND_UP(phy_clk, clk_get_rate(priv->clk));
-
- nb8800_writeb(priv, NB8800_IC_THRESHOLD, ict);
- nb8800_writeb(priv, NB8800_SLOT_TIME, slot_time);
- nb8800_maskb(priv, NB8800_MAC_MODE, mac_mode_mask, mac_mode);
-}
-
-static void nb8800_pause_config(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- u32 rxcr;
-
- if (priv->pause_aneg) {
- if (!phydev || !phydev->link)
- return;
-
- priv->pause_rx = phydev->pause;
- priv->pause_tx = phydev->pause ^ phydev->asym_pause;
- }
-
- nb8800_modb(priv, NB8800_RX_CTL, RX_PAUSE_EN, priv->pause_rx);
-
- rxcr = nb8800_readl(priv, NB8800_RXC_CR);
- if (!!(rxcr & RCR_FL) == priv->pause_tx)
- return;
-
- if (netif_running(dev)) {
- napi_disable(&priv->napi);
- netif_tx_lock_bh(dev);
- nb8800_dma_stop(dev);
- nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
- nb8800_start_rx(dev);
- netif_tx_unlock_bh(dev);
- napi_enable(&priv->napi);
- } else {
- nb8800_modl(priv, NB8800_RXC_CR, RCR_FL, priv->pause_tx);
- }
-}
-
-static void nb8800_link_reconfigure(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int change = 0;
-
- if (phydev->link) {
- if (phydev->speed != priv->speed) {
- priv->speed = phydev->speed;
- change = 1;
- }
-
- if (phydev->duplex != priv->duplex) {
- priv->duplex = phydev->duplex;
- change = 1;
- }
-
- if (change)
- nb8800_mac_config(dev);
-
- nb8800_pause_config(dev);
- }
-
- if (phydev->link != priv->link) {
- priv->link = phydev->link;
- change = 1;
- }
-
- if (change)
- phy_print_status(phydev);
-}
-
-static void nb8800_update_mac_addr(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int i;
-
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_SRC_ADDR(i), dev->dev_addr[i]);
-
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_UC_ADDR(i), dev->dev_addr[i]);
-}
-
-static int nb8800_set_mac_address(struct net_device *dev, void *addr)
-{
- struct sockaddr *sock = addr;
-
- if (netif_running(dev))
- return -EBUSY;
-
- ether_addr_copy(dev->dev_addr, sock->sa_data);
- nb8800_update_mac_addr(dev);
-
- return 0;
-}
-
-static void nb8800_mc_init(struct net_device *dev, int val)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- nb8800_writeb(priv, NB8800_MC_INIT, val);
- readb_poll_timeout_atomic(priv->base + NB8800_MC_INIT, val, !val,
- 1, 1000);
-}
-
-static void nb8800_set_rx_mode(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct netdev_hw_addr *ha;
- int i;
-
- if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
- nb8800_mac_af(dev, false);
- return;
- }
-
- nb8800_mac_af(dev, true);
- nb8800_mc_init(dev, 0);
-
- netdev_for_each_mc_addr(ha, dev) {
- for (i = 0; i < ETH_ALEN; i++)
- nb8800_writeb(priv, NB8800_MC_ADDR(i), ha->addr[i]);
-
- nb8800_mc_init(dev, 0xff);
- }
-}
-
-#define RX_DESC_SIZE (RX_DESC_COUNT * sizeof(struct nb8800_rx_desc))
-#define TX_DESC_SIZE (TX_DESC_COUNT * sizeof(struct nb8800_tx_desc))
-
-static void nb8800_dma_free(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int i;
-
- if (priv->rx_bufs) {
- for (i = 0; i < RX_DESC_COUNT; i++)
- if (priv->rx_bufs[i].page)
- put_page(priv->rx_bufs[i].page);
-
- kfree(priv->rx_bufs);
- priv->rx_bufs = NULL;
- }
-
- if (priv->tx_bufs) {
- for (i = 0; i < TX_DESC_COUNT; i++)
- kfree_skb(priv->tx_bufs[i].skb);
-
- kfree(priv->tx_bufs);
- priv->tx_bufs = NULL;
- }
-
- if (priv->rx_descs) {
- dma_free_coherent(dev->dev.parent, RX_DESC_SIZE, priv->rx_descs,
- priv->rx_desc_dma);
- priv->rx_descs = NULL;
- }
-
- if (priv->tx_descs) {
- dma_free_coherent(dev->dev.parent, TX_DESC_SIZE, priv->tx_descs,
- priv->tx_desc_dma);
- priv->tx_descs = NULL;
- }
-}
-
-static void nb8800_dma_reset(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_rx_desc *rxd;
- struct nb8800_tx_desc *txd;
- unsigned int i;
-
- for (i = 0; i < RX_DESC_COUNT; i++) {
- dma_addr_t rx_dma = priv->rx_desc_dma + i * sizeof(*rxd);
-
- rxd = &priv->rx_descs[i];
- rxd->desc.n_addr = rx_dma + sizeof(*rxd);
- rxd->desc.r_addr =
- rx_dma + offsetof(struct nb8800_rx_desc, report);
- rxd->desc.config = priv->rx_dma_config;
- rxd->report = 0;
- }
-
- rxd->desc.n_addr = priv->rx_desc_dma;
- rxd->desc.config |= DESC_EOC;
-
- priv->rx_eoc = RX_DESC_COUNT - 1;
-
- for (i = 0; i < TX_DESC_COUNT; i++) {
- struct nb8800_tx_buf *txb = &priv->tx_bufs[i];
- dma_addr_t r_dma = txb->dma_desc +
- offsetof(struct nb8800_tx_desc, report);
-
- txd = &priv->tx_descs[i];
- txd->desc[0].r_addr = r_dma;
- txd->desc[1].r_addr = r_dma;
- txd->report = 0;
- }
-
- priv->tx_next = 0;
- priv->tx_queue = 0;
- priv->tx_done = 0;
- atomic_set(&priv->tx_free, TX_DESC_COUNT);
-
- nb8800_writel(priv, NB8800_RX_DESC_ADDR, priv->rx_desc_dma);
-
- wmb(); /* ensure all setup is written before starting */
-}
-
-static int nb8800_dma_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- unsigned int n_rx = RX_DESC_COUNT;
- unsigned int n_tx = TX_DESC_COUNT;
- unsigned int i;
- int err;
-
- priv->rx_descs = dma_alloc_coherent(dev->dev.parent, RX_DESC_SIZE,
- &priv->rx_desc_dma, GFP_KERNEL);
- if (!priv->rx_descs)
- goto err_out;
-
- priv->rx_bufs = kcalloc(n_rx, sizeof(*priv->rx_bufs), GFP_KERNEL);
- if (!priv->rx_bufs)
- goto err_out;
-
- for (i = 0; i < n_rx; i++) {
- err = nb8800_alloc_rx(dev, i, false);
- if (err)
- goto err_out;
- }
-
- priv->tx_descs = dma_alloc_coherent(dev->dev.parent, TX_DESC_SIZE,
- &priv->tx_desc_dma, GFP_KERNEL);
- if (!priv->tx_descs)
- goto err_out;
-
- priv->tx_bufs = kcalloc(n_tx, sizeof(*priv->tx_bufs), GFP_KERNEL);
- if (!priv->tx_bufs)
- goto err_out;
-
- for (i = 0; i < n_tx; i++)
- priv->tx_bufs[i].dma_desc =
- priv->tx_desc_dma + i * sizeof(struct nb8800_tx_desc);
-
- nb8800_dma_reset(dev);
-
- return 0;
-
-err_out:
- nb8800_dma_free(dev);
-
- return -ENOMEM;
-}
-
-static int nb8800_dma_stop(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct nb8800_tx_buf *txb = &priv->tx_bufs[0];
- struct nb8800_tx_desc *txd = &priv->tx_descs[0];
- int retry = 5;
- u32 txcr;
- u32 rxcr;
- int err;
- unsigned int i;
-
- /* wait for tx to finish */
- err = readl_poll_timeout_atomic(priv->base + NB8800_TXC_CR, txcr,
- !(txcr & TCR_EN) &&
- priv->tx_done == priv->tx_next,
- 1000, 1000000);
- if (err)
- return err;
-
- /* The rx DMA only stops if it reaches the end of chain.
- * To make this happen, we set the EOC flag on all rx
- * descriptors, put the device in loopback mode, and send
- * a few dummy frames. The interrupt handler will ignore
- * these since NAPI is disabled and no real frames are in
- * the tx queue.
- */
-
- for (i = 0; i < RX_DESC_COUNT; i++)
- priv->rx_descs[i].desc.config |= DESC_EOC;
-
- txd->desc[0].s_addr =
- txb->dma_desc + offsetof(struct nb8800_tx_desc, buf);
- txd->desc[0].config = DESC_BTS(2) | DESC_DS | DESC_EOF | DESC_EOC | 8;
- memset(txd->buf, 0, sizeof(txd->buf));
-
- nb8800_mac_af(dev, false);
- nb8800_setb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
-
- do {
- nb8800_writel(priv, NB8800_TX_DESC_ADDR, txb->dma_desc);
- wmb();
- nb8800_writel(priv, NB8800_TXC_CR, txcr | TCR_EN);
-
- err = readl_poll_timeout_atomic(priv->base + NB8800_RXC_CR,
- rxcr, !(rxcr & RCR_EN),
- 1000, 100000);
- } while (err && --retry);
-
- nb8800_mac_af(dev, true);
- nb8800_clearb(priv, NB8800_MAC_MODE, LOOPBACK_EN);
- nb8800_dma_reset(dev);
-
- return retry ? 0 : -ETIMEDOUT;
-}
-
-static void nb8800_pause_adv(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- if (!phydev)
- return;
-
- phy_set_asym_pause(phydev, priv->pause_rx, priv->pause_tx);
-}
-
-static int nb8800_open(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev;
- int err;
-
- /* clear any pending interrupts */
- nb8800_writel(priv, NB8800_RXC_SR, 0xf);
- nb8800_writel(priv, NB8800_TXC_SR, 0xf);
-
- err = nb8800_dma_init(dev);
- if (err)
- return err;
-
- err = request_irq(dev->irq, nb8800_irq, 0, dev_name(&dev->dev), dev);
- if (err)
- goto err_free_dma;
-
- nb8800_mac_rx(dev, true);
- nb8800_mac_tx(dev, true);
-
- phydev = of_phy_connect(dev, priv->phy_node,
- nb8800_link_reconfigure, 0,
- priv->phy_mode);
- if (!phydev) {
- err = -ENODEV;
- goto err_free_irq;
- }
-
- nb8800_pause_adv(dev);
-
- netdev_reset_queue(dev);
- napi_enable(&priv->napi);
- netif_start_queue(dev);
-
- nb8800_start_rx(dev);
- phy_start(phydev);
-
- return 0;
-
-err_free_irq:
- free_irq(dev->irq, dev);
-err_free_dma:
- nb8800_dma_free(dev);
-
- return err;
-}
-
-static int nb8800_stop(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- phy_stop(phydev);
-
- netif_stop_queue(dev);
- napi_disable(&priv->napi);
-
- nb8800_dma_stop(dev);
- nb8800_mac_rx(dev, false);
- nb8800_mac_tx(dev, false);
-
- phy_disconnect(phydev);
-
- free_irq(dev->irq, dev);
-
- nb8800_dma_free(dev);
-
- return 0;
-}
-
-static const struct net_device_ops nb8800_netdev_ops = {
- .ndo_open = nb8800_open,
- .ndo_stop = nb8800_stop,
- .ndo_start_xmit = nb8800_xmit,
- .ndo_set_mac_address = nb8800_set_mac_address,
- .ndo_set_rx_mode = nb8800_set_rx_mode,
- .ndo_do_ioctl = phy_do_ioctl,
- .ndo_validate_addr = eth_validate_addr,
-};
-
-static void nb8800_get_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pp)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- pp->autoneg = priv->pause_aneg;
- pp->rx_pause = priv->pause_rx;
- pp->tx_pause = priv->pause_tx;
-}
-
-static int nb8800_set_pauseparam(struct net_device *dev,
- struct ethtool_pauseparam *pp)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
-
- priv->pause_aneg = pp->autoneg;
- priv->pause_rx = pp->rx_pause;
- priv->pause_tx = pp->tx_pause;
-
- nb8800_pause_adv(dev);
-
- if (!priv->pause_aneg)
- nb8800_pause_config(dev);
- else if (phydev)
- phy_start_aneg(phydev);
-
- return 0;
-}
-
-static const char nb8800_stats_names[][ETH_GSTRING_LEN] = {
- "rx_bytes_ok",
- "rx_frames_ok",
- "rx_undersize_frames",
- "rx_fragment_frames",
- "rx_64_byte_frames",
- "rx_127_byte_frames",
- "rx_255_byte_frames",
- "rx_511_byte_frames",
- "rx_1023_byte_frames",
- "rx_max_size_frames",
- "rx_oversize_frames",
- "rx_bad_fcs_frames",
- "rx_broadcast_frames",
- "rx_multicast_frames",
- "rx_control_frames",
- "rx_pause_frames",
- "rx_unsup_control_frames",
- "rx_align_error_frames",
- "rx_overrun_frames",
- "rx_jabber_frames",
- "rx_bytes",
- "rx_frames",
-
- "tx_bytes_ok",
- "tx_frames_ok",
- "tx_64_byte_frames",
- "tx_127_byte_frames",
- "tx_255_byte_frames",
- "tx_511_byte_frames",
- "tx_1023_byte_frames",
- "tx_max_size_frames",
- "tx_oversize_frames",
- "tx_broadcast_frames",
- "tx_multicast_frames",
- "tx_control_frames",
- "tx_pause_frames",
- "tx_underrun_frames",
- "tx_single_collision_frames",
- "tx_multi_collision_frames",
- "tx_deferred_collision_frames",
- "tx_late_collision_frames",
- "tx_excessive_collision_frames",
- "tx_bytes",
- "tx_frames",
- "tx_collisions",
-};
-
-#define NB8800_NUM_STATS ARRAY_SIZE(nb8800_stats_names)
-
-static int nb8800_get_sset_count(struct net_device *dev, int sset)
-{
- if (sset == ETH_SS_STATS)
- return NB8800_NUM_STATS;
-
- return -EOPNOTSUPP;
-}
-
-static void nb8800_get_strings(struct net_device *dev, u32 sset, u8 *buf)
-{
- if (sset == ETH_SS_STATS)
- memcpy(buf, &nb8800_stats_names, sizeof(nb8800_stats_names));
-}
-
-static u32 nb8800_read_stat(struct net_device *dev, int index)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- nb8800_writeb(priv, NB8800_STAT_INDEX, index);
-
- return nb8800_readl(priv, NB8800_STAT_DATA);
-}
-
-static void nb8800_get_ethtool_stats(struct net_device *dev,
- struct ethtool_stats *estats, u64 *st)
-{
- unsigned int i;
- u32 rx, tx;
-
- for (i = 0; i < NB8800_NUM_STATS / 2; i++) {
- rx = nb8800_read_stat(dev, i);
- tx = nb8800_read_stat(dev, i | 0x80);
- st[i] = rx;
- st[i + NB8800_NUM_STATS / 2] = tx;
- }
-}
-
-static const struct ethtool_ops nb8800_ethtool_ops = {
- .nway_reset = phy_ethtool_nway_reset,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = nb8800_get_pauseparam,
- .set_pauseparam = nb8800_set_pauseparam,
- .get_sset_count = nb8800_get_sset_count,
- .get_strings = nb8800_get_strings,
- .get_ethtool_stats = nb8800_get_ethtool_stats,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static int nb8800_hw_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- u32 val;
-
- val = TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS;
- nb8800_writeb(priv, NB8800_TX_CTL1, val);
-
- /* Collision retry count */
- nb8800_writeb(priv, NB8800_TX_CTL2, 5);
-
- val = RX_PAD_STRIP | RX_AF_EN;
- nb8800_writeb(priv, NB8800_RX_CTL, val);
-
- /* Chosen by fair dice roll */
- nb8800_writeb(priv, NB8800_RANDOM_SEED, 4);
-
- /* TX cycles per deferral period */
- nb8800_writeb(priv, NB8800_TX_SDP, 12);
-
- /* The following three threshold values have been
- * experimentally determined for good results.
- */
-
- /* RX/TX FIFO threshold for partial empty (64-bit entries) */
- nb8800_writeb(priv, NB8800_PE_THRESHOLD, 0);
-
- /* RX/TX FIFO threshold for partial full (64-bit entries) */
- nb8800_writeb(priv, NB8800_PF_THRESHOLD, 255);
-
- /* Buffer size for transmit (64-bit entries) */
- nb8800_writeb(priv, NB8800_TX_BUFSIZE, 64);
-
- /* Configure tx DMA */
-
- val = nb8800_readl(priv, NB8800_TXC_CR);
- val &= TCR_LE; /* keep endian setting */
- val |= TCR_DM; /* DMA descriptor mode */
- val |= TCR_RS; /* automatically store tx status */
- val |= TCR_DIE; /* interrupt on DMA chain completion */
- val |= TCR_TFI(7); /* interrupt after 7 frames transmitted */
- val |= TCR_BTS(2); /* 32-byte bus transaction size */
- nb8800_writel(priv, NB8800_TXC_CR, val);
-
- /* TX complete interrupt after 10 ms or 7 frames (see above) */
- val = clk_get_rate(priv->clk) / 100;
- nb8800_writel(priv, NB8800_TX_ITR, val);
-
- /* Configure rx DMA */
-
- val = nb8800_readl(priv, NB8800_RXC_CR);
- val &= RCR_LE; /* keep endian setting */
- val |= RCR_DM; /* DMA descriptor mode */
- val |= RCR_RS; /* automatically store rx status */
- val |= RCR_DIE; /* interrupt at end of DMA chain */
- val |= RCR_RFI(7); /* interrupt after 7 frames received */
- val |= RCR_BTS(2); /* 32-byte bus transaction size */
- nb8800_writel(priv, NB8800_RXC_CR, val);
-
- /* The rx interrupt can fire before the DMA has completed
- * unless a small delay is added. 50 us is hopefully enough.
- */
- priv->rx_itr_irq = clk_get_rate(priv->clk) / 20000;
-
- /* In NAPI poll mode we want to disable interrupts, but the
- * hardware does not permit this. Delay 10 ms instead.
- */
- priv->rx_itr_poll = clk_get_rate(priv->clk) / 100;
-
- nb8800_writel(priv, NB8800_RX_ITR, priv->rx_itr_irq);
-
- priv->rx_dma_config = RX_BUF_SIZE | DESC_BTS(2) | DESC_DS | DESC_EOF;
-
- /* Flow control settings */
-
- /* Pause time of 0.1 ms */
- val = 100000 / 512;
- nb8800_writeb(priv, NB8800_PQ1, val >> 8);
- nb8800_writeb(priv, NB8800_PQ2, val & 0xff);
-
- /* Auto-negotiate by default */
- priv->pause_aneg = true;
- priv->pause_rx = true;
- priv->pause_tx = true;
-
- nb8800_mc_init(dev, 0);
-
- return 0;
-}
-
-static int nb8800_tangox_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- u32 pad_mode = PAD_MODE_MII;
-
- switch (priv->phy_mode) {
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_GMII:
- pad_mode = PAD_MODE_MII;
- break;
-
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- pad_mode = PAD_MODE_RGMII;
- break;
-
- default:
- dev_err(dev->dev.parent, "unsupported phy mode %s\n",
- phy_modes(priv->phy_mode));
- return -EINVAL;
- }
-
- nb8800_writeb(priv, NB8800_TANGOX_PAD_MODE, pad_mode);
-
- return 0;
-}
-
-static int nb8800_tangox_reset(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int clk_div;
-
- nb8800_writeb(priv, NB8800_TANGOX_RESET, 0);
- usleep_range(1000, 10000);
- nb8800_writeb(priv, NB8800_TANGOX_RESET, 1);
-
- wmb(); /* ensure reset is cleared before proceeding */
-
- clk_div = DIV_ROUND_UP(clk_get_rate(priv->clk), 2 * MAX_MDC_CLOCK);
- nb8800_writew(priv, NB8800_TANGOX_MDIO_CLKDIV, clk_div);
-
- return 0;
-}
-
-static const struct nb8800_ops nb8800_tangox_ops = {
- .init = nb8800_tangox_init,
- .reset = nb8800_tangox_reset,
-};
-
-static int nb8800_tango4_init(struct net_device *dev)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
- int err;
-
- err = nb8800_tangox_init(dev);
- if (err)
- return err;
-
- /* On tango4 interrupt on DMA completion per frame works and gives
- * better performance despite generating more rx interrupts.
- */
-
- /* Disable unnecessary interrupt on rx completion */
- nb8800_clearl(priv, NB8800_RXC_CR, RCR_RFI(7));
-
- /* Request interrupt on descriptor DMA completion */
- priv->rx_dma_config |= DESC_ID;
-
- return 0;
-}
-
-static const struct nb8800_ops nb8800_tango4_ops = {
- .init = nb8800_tango4_init,
- .reset = nb8800_tangox_reset,
-};
-
-static const struct of_device_id nb8800_dt_ids[] = {
- {
- .compatible = "aurora,nb8800",
- },
- {
- .compatible = "sigma,smp8642-ethernet",
- .data = &nb8800_tangox_ops,
- },
- {
- .compatible = "sigma,smp8734-ethernet",
- .data = &nb8800_tango4_ops,
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, nb8800_dt_ids);
-
-static int nb8800_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- const struct nb8800_ops *ops = NULL;
- struct nb8800_priv *priv;
- struct resource *res;
- struct net_device *dev;
- struct mii_bus *bus;
- const unsigned char *mac;
- void __iomem *base;
- int irq;
- int ret;
-
- match = of_match_device(nb8800_dt_ids, &pdev->dev);
- if (match)
- ops = match->data;
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -EINVAL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- dev_dbg(&pdev->dev, "AU-NB8800 Ethernet at %pa\n", &res->start);
-
- dev = alloc_etherdev(sizeof(*priv));
- if (!dev)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, &pdev->dev);
-
- priv = netdev_priv(dev);
- priv->base = base;
-
- ret = of_get_phy_mode(pdev->dev.of_node, &priv->phy_mode);
- if (ret)
- priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- ret = PTR_ERR(priv->clk);
- goto err_free_dev;
- }
-
- ret = clk_prepare_enable(priv->clk);
- if (ret)
- goto err_free_dev;
-
- spin_lock_init(&priv->tx_lock);
-
- if (ops && ops->reset) {
- ret = ops->reset(dev);
- if (ret)
- goto err_disable_clk;
- }
-
- bus = devm_mdiobus_alloc(&pdev->dev);
- if (!bus) {
- ret = -ENOMEM;
- goto err_disable_clk;
- }
-
- bus->name = "nb8800-mii";
- bus->read = nb8800_mdio_read;
- bus->write = nb8800_mdio_write;
- bus->parent = &pdev->dev;
- snprintf(bus->id, MII_BUS_ID_SIZE, "%lx.nb8800-mii",
- (unsigned long)res->start);
- bus->priv = priv;
-
- ret = of_mdiobus_register(bus, pdev->dev.of_node);
- if (ret) {
- dev_err(&pdev->dev, "failed to register MII bus\n");
- goto err_disable_clk;
- }
-
- if (of_phy_is_fixed_link(pdev->dev.of_node)) {
- ret = of_phy_register_fixed_link(pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "bad fixed-link spec\n");
- goto err_free_bus;
- }
- priv->phy_node = of_node_get(pdev->dev.of_node);
- }
-
- if (!priv->phy_node)
- priv->phy_node = of_parse_phandle(pdev->dev.of_node,
- "phy-handle", 0);
-
- if (!priv->phy_node) {
- dev_err(&pdev->dev, "no PHY specified\n");
- ret = -ENODEV;
- goto err_free_bus;
- }
-
- priv->mii_bus = bus;
-
- ret = nb8800_hw_init(dev);
- if (ret)
- goto err_deregister_fixed_link;
-
- if (ops && ops->init) {
- ret = ops->init(dev);
- if (ret)
- goto err_deregister_fixed_link;
- }
-
- dev->netdev_ops = &nb8800_netdev_ops;
- dev->ethtool_ops = &nb8800_ethtool_ops;
- dev->flags |= IFF_MULTICAST;
- dev->irq = irq;
-
- mac = of_get_mac_address(pdev->dev.of_node);
- if (!IS_ERR(mac))
- ether_addr_copy(dev->dev_addr, mac);
-
- if (!is_valid_ether_addr(dev->dev_addr))
- eth_hw_addr_random(dev);
-
- nb8800_update_mac_addr(dev);
-
- netif_carrier_off(dev);
-
- ret = register_netdev(dev);
- if (ret) {
- netdev_err(dev, "failed to register netdev\n");
- goto err_free_dma;
- }
-
- netif_napi_add(dev, &priv->napi, nb8800_poll, NAPI_POLL_WEIGHT);
-
- netdev_info(dev, "MAC address %pM\n", dev->dev_addr);
-
- return 0;
-
-err_free_dma:
- nb8800_dma_free(dev);
-err_deregister_fixed_link:
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
-err_free_bus:
- of_node_put(priv->phy_node);
- mdiobus_unregister(bus);
-err_disable_clk:
- clk_disable_unprepare(priv->clk);
-err_free_dev:
- free_netdev(dev);
-
- return ret;
-}
-
-static int nb8800_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct nb8800_priv *priv = netdev_priv(ndev);
-
- unregister_netdev(ndev);
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
- of_node_put(priv->phy_node);
-
- mdiobus_unregister(priv->mii_bus);
-
- clk_disable_unprepare(priv->clk);
-
- nb8800_dma_free(ndev);
- free_netdev(ndev);
-
- return 0;
-}
-
-static struct platform_driver nb8800_driver = {
- .driver = {
- .name = "nb8800",
- .of_match_table = nb8800_dt_ids,
- },
- .probe = nb8800_probe,
- .remove = nb8800_remove,
-};
-
-module_platform_driver(nb8800_driver);
-
-MODULE_DESCRIPTION("Aurora AU-NB8800 Ethernet driver");
-MODULE_AUTHOR("Mans Rullgard <mans@mansr.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
deleted file mode 100644
index 40941fb6065b..000000000000
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _NB8800_H_
-#define _NB8800_H_
-
-#include <linux/types.h>
-#include <linux/skbuff.h>
-#include <linux/phy.h>
-#include <linux/clk.h>
-#include <linux/bitops.h>
-
-#define RX_DESC_COUNT 256
-#define TX_DESC_COUNT 256
-
-#define NB8800_DESC_LOW 4
-
-#define RX_BUF_SIZE 1552
-
-#define RX_COPYBREAK 256
-#define RX_COPYHDR 128
-
-#define MAX_MDC_CLOCK 2500000
-
-/* Stargate Solutions SSN8800 core registers */
-#define NB8800_TX_CTL1 0x000
-#define TX_TPD BIT(5)
-#define TX_APPEND_FCS BIT(4)
-#define TX_PAD_EN BIT(3)
-#define TX_RETRY_EN BIT(2)
-#define TX_EN BIT(0)
-
-#define NB8800_TX_CTL2 0x001
-
-#define NB8800_RX_CTL 0x004
-#define RX_BC_DISABLE BIT(7)
-#define RX_RUNT BIT(6)
-#define RX_AF_EN BIT(5)
-#define RX_PAUSE_EN BIT(3)
-#define RX_SEND_CRC BIT(2)
-#define RX_PAD_STRIP BIT(1)
-#define RX_EN BIT(0)
-
-#define NB8800_RANDOM_SEED 0x008
-#define NB8800_TX_SDP 0x14
-#define NB8800_TX_TPDP1 0x18
-#define NB8800_TX_TPDP2 0x19
-#define NB8800_SLOT_TIME 0x1c
-
-#define NB8800_MDIO_CMD 0x020
-#define MDIO_CMD_GO BIT(31)
-#define MDIO_CMD_WR BIT(26)
-#define MDIO_CMD_ADDR(x) ((x) << 21)
-#define MDIO_CMD_REG(x) ((x) << 16)
-#define MDIO_CMD_DATA(x) ((x) << 0)
-
-#define NB8800_MDIO_STS 0x024
-#define MDIO_STS_ERR BIT(31)
-
-#define NB8800_MC_ADDR(i) (0x028 + (i))
-#define NB8800_MC_INIT 0x02e
-#define NB8800_UC_ADDR(i) (0x03c + (i))
-
-#define NB8800_MAC_MODE 0x044
-#define RGMII_MODE BIT(7)
-#define HALF_DUPLEX BIT(4)
-#define BURST_EN BIT(3)
-#define LOOPBACK_EN BIT(2)
-#define GMAC_MODE BIT(0)
-
-#define NB8800_IC_THRESHOLD 0x050
-#define NB8800_PE_THRESHOLD 0x051
-#define NB8800_PF_THRESHOLD 0x052
-#define NB8800_TX_BUFSIZE 0x054
-#define NB8800_FIFO_CTL 0x056
-#define NB8800_PQ1 0x060
-#define NB8800_PQ2 0x061
-#define NB8800_SRC_ADDR(i) (0x06a + (i))
-#define NB8800_STAT_DATA 0x078
-#define NB8800_STAT_INDEX 0x07c
-#define NB8800_STAT_CLEAR 0x07d
-
-#define NB8800_SLEEP_MODE 0x07e
-#define SLEEP_MODE BIT(0)
-
-#define NB8800_WAKEUP 0x07f
-#define WAKEUP BIT(0)
-
-/* Aurora NB8800 host interface registers */
-#define NB8800_TXC_CR 0x100
-#define TCR_LK BIT(12)
-#define TCR_DS BIT(11)
-#define TCR_BTS(x) (((x) & 0x7) << 8)
-#define TCR_DIE BIT(7)
-#define TCR_TFI(x) (((x) & 0x7) << 4)
-#define TCR_LE BIT(3)
-#define TCR_RS BIT(2)
-#define TCR_DM BIT(1)
-#define TCR_EN BIT(0)
-
-#define NB8800_TXC_SR 0x104
-#define TSR_DE BIT(3)
-#define TSR_DI BIT(2)
-#define TSR_TO BIT(1)
-#define TSR_TI BIT(0)
-
-#define NB8800_TX_SAR 0x108
-#define NB8800_TX_DESC_ADDR 0x10c
-
-#define NB8800_TX_REPORT_ADDR 0x110
-#define TX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xffff)
-#define TX_FIRST_DEFERRAL BIT(7)
-#define TX_EARLY_COLLISIONS(x) (((x) >> 3) & 0xf)
-#define TX_LATE_COLLISION BIT(2)
-#define TX_PACKET_DROPPED BIT(1)
-#define TX_FIFO_UNDERRUN BIT(0)
-#define IS_TX_ERROR(r) ((r) & 0x07)
-
-#define NB8800_TX_FIFO_SR 0x114
-#define NB8800_TX_ITR 0x118
-
-#define NB8800_RXC_CR 0x200
-#define RCR_FL BIT(13)
-#define RCR_LK BIT(12)
-#define RCR_DS BIT(11)
-#define RCR_BTS(x) (((x) & 7) << 8)
-#define RCR_DIE BIT(7)
-#define RCR_RFI(x) (((x) & 7) << 4)
-#define RCR_LE BIT(3)
-#define RCR_RS BIT(2)
-#define RCR_DM BIT(1)
-#define RCR_EN BIT(0)
-
-#define NB8800_RXC_SR 0x204
-#define RSR_DE BIT(3)
-#define RSR_DI BIT(2)
-#define RSR_RO BIT(1)
-#define RSR_RI BIT(0)
-
-#define NB8800_RX_SAR 0x208
-#define NB8800_RX_DESC_ADDR 0x20c
-
-#define NB8800_RX_REPORT_ADDR 0x210
-#define RX_BYTES_TRANSFERRED(x) (((x) >> 16) & 0xFFFF)
-#define RX_MULTICAST_PKT BIT(9)
-#define RX_BROADCAST_PKT BIT(8)
-#define RX_LENGTH_ERR BIT(7)
-#define RX_FCS_ERR BIT(6)
-#define RX_RUNT_PKT BIT(5)
-#define RX_FIFO_OVERRUN BIT(4)
-#define RX_LATE_COLLISION BIT(3)
-#define RX_ALIGNMENT_ERROR BIT(2)
-#define RX_ERROR_MASK 0xfc
-#define IS_RX_ERROR(r) ((r) & RX_ERROR_MASK)
-
-#define NB8800_RX_FIFO_SR 0x214
-#define NB8800_RX_ITR 0x218
-
-/* Sigma Designs SMP86xx additional registers */
-#define NB8800_TANGOX_PAD_MODE 0x400
-#define PAD_MODE_MASK 0x7
-#define PAD_MODE_MII 0x0
-#define PAD_MODE_RGMII 0x1
-#define PAD_MODE_GTX_CLK_INV BIT(3)
-#define PAD_MODE_GTX_CLK_DELAY BIT(4)
-
-#define NB8800_TANGOX_MDIO_CLKDIV 0x420
-#define NB8800_TANGOX_RESET 0x424
-
-/* Hardware DMA descriptor */
-struct nb8800_dma_desc {
- u32 s_addr; /* start address */
- u32 n_addr; /* next descriptor address */
- u32 r_addr; /* report address */
- u32 config;
-} __aligned(8);
-
-#define DESC_ID BIT(23)
-#define DESC_EOC BIT(22)
-#define DESC_EOF BIT(21)
-#define DESC_LK BIT(20)
-#define DESC_DS BIT(19)
-#define DESC_BTS(x) (((x) & 0x7) << 16)
-
-/* DMA descriptor and associated data for rx.
- * Allocated from coherent memory.
- */
-struct nb8800_rx_desc {
- /* DMA descriptor */
- struct nb8800_dma_desc desc;
-
- /* Status report filled in by hardware */
- u32 report;
-};
-
-/* Address of buffer on rx ring */
-struct nb8800_rx_buf {
- struct page *page;
- unsigned long offset;
-};
-
-/* DMA descriptors and associated data for tx.
- * Allocated from coherent memory.
- */
-struct nb8800_tx_desc {
- /* DMA descriptor. The second descriptor is used if packet
- * data is unaligned.
- */
- struct nb8800_dma_desc desc[2];
-
- /* Status report filled in by hardware */
- u32 report;
-
- /* Bounce buffer for initial unaligned part of packet */
- u8 buf[8] __aligned(8);
-};
-
-/* Packet in tx queue */
-struct nb8800_tx_buf {
- /* Currently queued skb */
- struct sk_buff *skb;
-
- /* DMA address of the first descriptor */
- dma_addr_t dma_desc;
-
- /* DMA address of packet data */
- dma_addr_t dma_addr;
-
- /* Length of DMA mapping, less than skb->len if alignment
- * buffer is used.
- */
- unsigned int dma_len;
-
- /* Number of packets in chain starting here */
- unsigned int chain_len;
-
- /* Packet chain ready to be submitted to hardware */
- bool ready;
-};
-
-struct nb8800_priv {
- struct napi_struct napi;
-
- void __iomem *base;
-
- /* RX DMA descriptors */
- struct nb8800_rx_desc *rx_descs;
-
- /* RX buffers referenced by DMA descriptors */
- struct nb8800_rx_buf *rx_bufs;
-
- /* Current end of chain */
- u32 rx_eoc;
-
- /* Value for rx interrupt time register in NAPI interrupt mode */
- u32 rx_itr_irq;
-
- /* Value for rx interrupt time register in NAPI poll mode */
- u32 rx_itr_poll;
-
- /* Value for config field of rx DMA descriptors */
- u32 rx_dma_config;
-
- /* TX DMA descriptors */
- struct nb8800_tx_desc *tx_descs;
-
- /* TX packet queue */
- struct nb8800_tx_buf *tx_bufs;
-
- /* Number of free tx queue entries */
- atomic_t tx_free;
-
- /* First free tx queue entry */
- u32 tx_next;
-
- /* Next buffer to transmit */
- u32 tx_queue;
-
- /* Start of current packet chain */
- struct nb8800_tx_buf *tx_chain;
-
- /* Next buffer to reclaim */
- u32 tx_done;
-
- /* Lock for DMA activation */
- spinlock_t tx_lock;
-
- struct mii_bus *mii_bus;
- struct device_node *phy_node;
-
- /* PHY connection type from DT */
- phy_interface_t phy_mode;
-
- /* Current link status */
- int speed;
- int duplex;
- int link;
-
- /* Pause settings */
- bool pause_aneg;
- bool pause_rx;
- bool pause_tx;
-
- /* DMA base address of rx descriptors, see rx_descs above */
- dma_addr_t rx_desc_dma;
-
- /* DMA base address of tx descriptors, see tx_descs above */
- dma_addr_t tx_desc_dma;
-
- struct clk *clk;
-};
-
-struct nb8800_ops {
- int (*init)(struct net_device *dev);
- int (*reset)(struct net_device *dev);
-};
-
-#endif /* _NB8800_H_ */
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 7b79528d6eed..f8a168b73307 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -51,6 +51,14 @@ config B44_PCI
depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
default y
+config BCM4908_ENET
+ tristate "Broadcom BCM4908 internal mac support"
+ depends on ARCH_BCM4908 || COMPILE_TEST
+ default y
+ help
+ This driver supports Ethernet controller integrated into Broadcom
+ BCM4908 family SoCs.
+
config BCM63XX_ENET
tristate "Broadcom 63xx internal mac support"
depends on BCM63XX
@@ -174,7 +182,6 @@ config BGMAC_BCMA
config BGMAC_PLATFORM
tristate "Broadcom iProc GBit platform support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on OF
select BGMAC
select PHYLIB
select FIXED_PHY
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 7046ad6d3d0e..0ddfb5b5d53c 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_B44) += b44.o
+obj-$(CONFIG_BCM4908_ENET) += bcm4908_enet.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
obj-$(CONFIG_BCMGENET) += genet/
obj-$(CONFIG_BNX2) += bnx2.o
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
new file mode 100644
index 000000000000..9be33dc98072
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -0,0 +1,689 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Rafał Miłecki <rafal@milecki.pl>
+ */
+
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "bcm4908_enet.h"
+#include "unimac.h"
+
+#define ENET_DMA_CH_RX_CFG ENET_DMA_CH0_CFG
+#define ENET_DMA_CH_TX_CFG ENET_DMA_CH1_CFG
+#define ENET_DMA_CH_RX_STATE_RAM ENET_DMA_CH0_STATE_RAM
+#define ENET_DMA_CH_TX_STATE_RAM ENET_DMA_CH1_STATE_RAM
+
+#define ENET_TX_BDS_NUM 200
+#define ENET_RX_BDS_NUM 200
+#define ENET_RX_BDS_NUM_MAX 8192
+
+#define ENET_DMA_INT_DEFAULTS (ENET_DMA_CH_CFG_INT_DONE | \
+ ENET_DMA_CH_CFG_INT_NO_DESC | \
+ ENET_DMA_CH_CFG_INT_BUFF_DONE)
+#define ENET_DMA_MAX_BURST_LEN 8 /* in 64 bit words */
+
+#define ENET_MTU_MAX ETH_DATA_LEN /* Is it possible to support 2044? */
+#define BRCM_MAX_TAG_LEN 6
+#define ENET_MAX_ETH_OVERHEAD (ETH_HLEN + BRCM_MAX_TAG_LEN + VLAN_HLEN + \
+ ETH_FCS_LEN + 4) /* 32 */
+
+struct bcm4908_enet_dma_ring_bd {
+ __le32 ctl;
+ __le32 addr;
+} __packed;
+
+struct bcm4908_enet_dma_ring_slot {
+ struct sk_buff *skb;
+ unsigned int len;
+ dma_addr_t dma_addr;
+};
+
+struct bcm4908_enet_dma_ring {
+ int is_tx;
+ int read_idx;
+ int write_idx;
+ int length;
+ u16 cfg_block;
+ u16 st_ram_block;
+
+ union {
+ void *cpu_addr;
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ };
+ dma_addr_t dma_addr;
+
+ struct bcm4908_enet_dma_ring_slot *slots;
+};
+
+struct bcm4908_enet {
+ struct device *dev;
+ struct net_device *netdev;
+ struct napi_struct napi;
+ void __iomem *base;
+
+ struct bcm4908_enet_dma_ring tx_ring;
+ struct bcm4908_enet_dma_ring rx_ring;
+};
+
+/***
+ * R/W ops
+ */
+
+static u32 enet_read(struct bcm4908_enet *enet, u16 offset)
+{
+ return readl(enet->base + offset);
+}
+
+static void enet_write(struct bcm4908_enet *enet, u16 offset, u32 value)
+{
+ writel(value, enet->base + offset);
+}
+
+static void enet_maskset(struct bcm4908_enet *enet, u16 offset, u32 mask, u32 set)
+{
+ u32 val;
+
+ WARN_ON(set & ~mask);
+
+ val = enet_read(enet, offset);
+ val = (val & ~mask) | (set & mask);
+ enet_write(enet, offset, val);
+}
+
+static void enet_set(struct bcm4908_enet *enet, u16 offset, u32 set)
+{
+ enet_maskset(enet, offset, set, set);
+}
+
+static u32 enet_umac_read(struct bcm4908_enet *enet, u16 offset)
+{
+ return enet_read(enet, ENET_UNIMAC + offset);
+}
+
+static void enet_umac_write(struct bcm4908_enet *enet, u16 offset, u32 value)
+{
+ enet_write(enet, ENET_UNIMAC + offset, value);
+}
+
+static void enet_umac_set(struct bcm4908_enet *enet, u16 offset, u32 set)
+{
+ enet_set(enet, ENET_UNIMAC + offset, set);
+}
+
+/***
+ * Helpers
+ */
+
+static void bcm4908_enet_intrs_on(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, ENET_DMA_INT_DEFAULTS);
+}
+
+static void bcm4908_enet_intrs_off(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_MASK, 0);
+}
+
+static void bcm4908_enet_intrs_ack(struct bcm4908_enet *enet)
+{
+ enet_write(enet, ENET_DMA_CH_RX_CFG + ENET_DMA_CH_CFG_INT_STAT, ENET_DMA_INT_DEFAULTS);
+}
+
+static void bcm4908_enet_set_mtu(struct bcm4908_enet *enet, int mtu)
+{
+ enet_umac_write(enet, UMAC_MAX_FRAME_LEN, mtu + ENET_MAX_ETH_OVERHEAD);
+}
+
+/***
+ * DMA
+ */
+
+static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ int size = ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ struct device *dev = enet->dev;
+
+ ring->cpu_addr = dma_alloc_coherent(dev, size, &ring->dma_addr, GFP_KERNEL);
+ if (!ring->cpu_addr)
+ return -ENOMEM;
+
+ if (((uintptr_t)ring->cpu_addr) & (0x40 - 1)) {
+ dev_err(dev, "Invalid DMA ring alignment\n");
+ goto err_free_buf_descs;
+ }
+
+ ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
+ if (!ring->slots)
+ goto err_free_buf_descs;
+
+ ring->read_idx = 0;
+ ring->write_idx = 0;
+
+ return 0;
+
+err_free_buf_descs:
+ dma_free_coherent(dev, size, ring->cpu_addr, ring->dma_addr);
+ return -ENOMEM;
+}
+
+static void bcm4908_enet_dma_free(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int size;
+
+ size = rx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ if (rx_ring->cpu_addr)
+ dma_free_coherent(dev, size, rx_ring->cpu_addr, rx_ring->dma_addr);
+ kfree(rx_ring->slots);
+
+ size = tx_ring->length * sizeof(struct bcm4908_enet_dma_ring_bd);
+ if (tx_ring->cpu_addr)
+ dma_free_coherent(dev, size, tx_ring->cpu_addr, tx_ring->dma_addr);
+ kfree(tx_ring->slots);
+}
+
+static int bcm4908_enet_dma_alloc(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *tx_ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int err;
+
+ tx_ring->length = ENET_TX_BDS_NUM;
+ tx_ring->is_tx = 1;
+ tx_ring->cfg_block = ENET_DMA_CH_TX_CFG;
+ tx_ring->st_ram_block = ENET_DMA_CH_TX_STATE_RAM;
+ err = bcm4908_dma_alloc_buf_descs(enet, tx_ring);
+ if (err) {
+ dev_err(dev, "Failed to alloc TX buf descriptors: %d\n", err);
+ return err;
+ }
+
+ rx_ring->length = ENET_RX_BDS_NUM;
+ rx_ring->is_tx = 0;
+ rx_ring->cfg_block = ENET_DMA_CH_RX_CFG;
+ rx_ring->st_ram_block = ENET_DMA_CH_RX_STATE_RAM;
+ err = bcm4908_dma_alloc_buf_descs(enet, rx_ring);
+ if (err) {
+ dev_err(dev, "Failed to alloc RX buf descriptors: %d\n", err);
+ bcm4908_enet_dma_free(enet);
+ return err;
+ }
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_reset(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rings[] = { &enet->rx_ring, &enet->tx_ring };
+ int i;
+
+ /* Disable the DMA controller and channel */
+ for (i = 0; i < ARRAY_SIZE(rings); i++)
+ enet_write(enet, rings[i]->cfg_block + ENET_DMA_CH_CFG, 0);
+ enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN, 0);
+
+ /* Reset channels state */
+ for (i = 0; i < ARRAY_SIZE(rings); i++) {
+ struct bcm4908_enet_dma_ring *ring = rings[i];
+
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_STATE_DATA, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS, 0);
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR, 0);
+ }
+}
+
+static int bcm4908_enet_dma_alloc_rx_buf(struct bcm4908_enet *enet, unsigned int idx)
+{
+ struct bcm4908_enet_dma_ring_bd *buf_desc = &enet->rx_ring.buf_desc[idx];
+ struct bcm4908_enet_dma_ring_slot *slot = &enet->rx_ring.slots[idx];
+ struct device *dev = enet->dev;
+ u32 tmp;
+ int err;
+
+ slot->len = ENET_MTU_MAX + ENET_MAX_ETH_OVERHEAD;
+
+ slot->skb = netdev_alloc_skb(enet->netdev, slot->len);
+ if (!slot->skb)
+ return -ENOMEM;
+
+ slot->dma_addr = dma_map_single(dev, slot->skb->data, slot->len, DMA_FROM_DEVICE);
+ err = dma_mapping_error(dev, slot->dma_addr);
+ if (err) {
+ dev_err(dev, "Failed to map DMA buffer: %d\n", err);
+ kfree_skb(slot->skb);
+ slot->skb = NULL;
+ return err;
+ }
+
+ tmp = slot->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp |= DMA_CTL_STATUS_OWN;
+ if (idx == enet->rx_ring.length - 1)
+ tmp |= DMA_CTL_STATUS_WRAP;
+ buf_desc->ctl = cpu_to_le32(tmp);
+ buf_desc->addr = cpu_to_le32(slot->dma_addr);
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_ring_init(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ int reset_channel = 0; /* We support only 1 main channel (with TX and RX) */
+ int reset_subch = ring->is_tx ? 1 : 0;
+
+ /* Reset the DMA channel */
+ enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, BIT(reset_channel * 2 + reset_subch));
+ enet_write(enet, ENET_DMA_CTRL_CHANNEL_RESET, 0);
+
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_MAX_BURST, ENET_DMA_MAX_BURST_LEN);
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG_INT_MASK, 0);
+
+ enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR,
+ (uint32_t)ring->dma_addr);
+}
+
+static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct bcm4908_enet_dma_ring_slot *slot;
+ struct device *dev = enet->dev;
+ int i;
+
+ for (i = rx_ring->length - 1; i >= 0; i--) {
+ slot = &rx_ring->slots[i];
+ if (!slot->skb)
+ continue;
+ dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_FROM_DEVICE);
+ kfree_skb(slot->skb);
+ slot->skb = NULL;
+ }
+}
+
+static int bcm4908_enet_dma_init(struct bcm4908_enet *enet)
+{
+ struct bcm4908_enet_dma_ring *rx_ring = &enet->rx_ring;
+ struct device *dev = enet->dev;
+ int err;
+ int i;
+
+ for (i = 0; i < rx_ring->length; i++) {
+ err = bcm4908_enet_dma_alloc_rx_buf(enet, i);
+ if (err) {
+ dev_err(dev, "Failed to alloc RX buffer: %d\n", err);
+ bcm4908_enet_dma_uninit(enet);
+ return err;
+ }
+ }
+
+ bcm4908_enet_dma_ring_init(enet, &enet->tx_ring);
+ bcm4908_enet_dma_ring_init(enet, &enet->rx_ring);
+
+ return 0;
+}
+
+static void bcm4908_enet_dma_tx_ring_enable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
+}
+
+static void bcm4908_enet_dma_tx_ring_disable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_write(enet, ring->cfg_block + ENET_DMA_CH_CFG, 0);
+}
+
+static void bcm4908_enet_dma_rx_ring_enable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ enet_set(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE);
+}
+
+static void bcm4908_enet_dma_rx_ring_disable(struct bcm4908_enet *enet,
+ struct bcm4908_enet_dma_ring *ring)
+{
+ unsigned long deadline;
+ u32 tmp;
+
+ enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
+
+ deadline = jiffies + usecs_to_jiffies(2000);
+ do {
+ tmp = enet_read(enet, ring->cfg_block + ENET_DMA_CH_CFG);
+ if (!(tmp & ENET_DMA_CH_CFG_ENABLE))
+ return;
+ enet_maskset(enet, ring->cfg_block + ENET_DMA_CH_CFG, ENET_DMA_CH_CFG_ENABLE, 0);
+ usleep_range(10, 30);
+ } while (!time_after_eq(jiffies, deadline));
+
+ dev_warn(enet->dev, "Timeout waiting for DMA TX stop\n");
+}
+
+/***
+ * Ethernet driver
+ */
+
+static void bcm4908_enet_gmac_init(struct bcm4908_enet *enet)
+{
+ u32 cmd;
+
+ bcm4908_enet_set_mtu(enet, enet->netdev->mtu);
+
+ cmd = enet_umac_read(enet, UMAC_CMD);
+ enet_umac_write(enet, UMAC_CMD, cmd | CMD_SW_RESET);
+ enet_umac_write(enet, UMAC_CMD, cmd & ~CMD_SW_RESET);
+
+ enet_set(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH);
+ enet_maskset(enet, ENET_FLUSH, ENET_FLUSH_RXFIFO_FLUSH | ENET_FLUSH_TXFIFO_FLUSH, 0);
+
+ enet_set(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB);
+ enet_maskset(enet, ENET_MIB_CTRL, ENET_MIB_CTRL_CLR_MIB, 0);
+
+ cmd = enet_umac_read(enet, UMAC_CMD);
+ cmd &= ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT);
+ cmd &= ~CMD_TX_EN;
+ cmd &= ~CMD_RX_EN;
+ cmd |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
+ enet_umac_write(enet, UMAC_CMD, cmd);
+
+ enet_maskset(enet, ENET_GMAC_STATUS,
+ ENET_GMAC_STATUS_ETH_SPEED_MASK |
+ ENET_GMAC_STATUS_HD |
+ ENET_GMAC_STATUS_AUTO_CFG_EN |
+ ENET_GMAC_STATUS_LINK_UP,
+ ENET_GMAC_STATUS_ETH_SPEED_1000 |
+ ENET_GMAC_STATUS_AUTO_CFG_EN |
+ ENET_GMAC_STATUS_LINK_UP);
+}
+
+static irqreturn_t bcm4908_enet_irq_handler(int irq, void *dev_id)
+{
+ struct bcm4908_enet *enet = dev_id;
+
+ bcm4908_enet_intrs_off(enet);
+ bcm4908_enet_intrs_ack(enet);
+
+ napi_schedule(&enet->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm4908_enet_open(struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct device *dev = enet->dev;
+ int err;
+
+ err = request_irq(netdev->irq, bcm4908_enet_irq_handler, 0, "enet", enet);
+ if (err) {
+ dev_err(dev, "Failed to request IRQ %d: %d\n", netdev->irq, err);
+ return err;
+ }
+
+ bcm4908_enet_gmac_init(enet);
+ bcm4908_enet_dma_reset(enet);
+ bcm4908_enet_dma_init(enet);
+
+ enet_umac_set(enet, UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
+
+ enet_set(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_MASTER_EN);
+ enet_maskset(enet, ENET_DMA_CONTROLLER_CFG, ENET_DMA_CTRL_CFG_FLOWC_CH1_EN, 0);
+ bcm4908_enet_dma_rx_ring_enable(enet, &enet->rx_ring);
+
+ napi_enable(&enet->napi);
+ netif_carrier_on(netdev);
+ netif_start_queue(netdev);
+
+ bcm4908_enet_intrs_ack(enet);
+ bcm4908_enet_intrs_on(enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_stop(struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+ netif_carrier_off(netdev);
+ napi_disable(&enet->napi);
+
+ bcm4908_enet_dma_rx_ring_disable(enet, &enet->rx_ring);
+ bcm4908_enet_dma_tx_ring_disable(enet, &enet->tx_ring);
+
+ bcm4908_enet_dma_uninit(enet);
+
+ free_irq(enet->netdev->irq, enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+ struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
+ struct bcm4908_enet_dma_ring_slot *slot;
+ struct device *dev = enet->dev;
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ int free_buf_descs;
+ u32 tmp;
+
+ /* Free transmitted skbs */
+ while (ring->read_idx != ring->write_idx) {
+ buf_desc = &ring->buf_desc[ring->read_idx];
+ if (le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)
+ break;
+ slot = &ring->slots[ring->read_idx];
+
+ dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE);
+ dev_kfree_skb(slot->skb);
+ if (++ring->read_idx == ring->length)
+ ring->read_idx = 0;
+ }
+
+ /* Don't use the last empty buf descriptor */
+ if (ring->read_idx <= ring->write_idx)
+ free_buf_descs = ring->read_idx - ring->write_idx + ring->length;
+ else
+ free_buf_descs = ring->read_idx - ring->write_idx;
+ if (free_buf_descs < 2)
+ return NETDEV_TX_BUSY;
+
+ /* Hardware removes OWN bit after sending data */
+ buf_desc = &ring->buf_desc[ring->write_idx];
+ if (unlikely(le32_to_cpu(buf_desc->ctl) & DMA_CTL_STATUS_OWN)) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ slot = &ring->slots[ring->write_idx];
+ slot->skb = skb;
+ slot->len = skb->len;
+ slot->dma_addr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, slot->dma_addr)))
+ return NETDEV_TX_BUSY;
+
+ tmp = skb->len << DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+ tmp |= DMA_CTL_STATUS_OWN;
+ tmp |= DMA_CTL_STATUS_SOP;
+ tmp |= DMA_CTL_STATUS_EOP;
+ tmp |= DMA_CTL_STATUS_APPEND_CRC;
+ if (ring->write_idx + 1 == ring->length - 1)
+ tmp |= DMA_CTL_STATUS_WRAP;
+
+ buf_desc->addr = cpu_to_le32((uint32_t)slot->dma_addr);
+ buf_desc->ctl = cpu_to_le32(tmp);
+
+ bcm4908_enet_dma_tx_ring_enable(enet, &enet->tx_ring);
+
+ if (++ring->write_idx == ring->length - 1)
+ ring->write_idx = 0;
+ enet->netdev->stats.tx_bytes += skb->len;
+ enet->netdev->stats.tx_packets++;
+
+ return NETDEV_TX_OK;
+}
+
+static int bcm4908_enet_poll(struct napi_struct *napi, int weight)
+{
+ struct bcm4908_enet *enet = container_of(napi, struct bcm4908_enet, napi);
+ struct device *dev = enet->dev;
+ int handled = 0;
+
+ while (handled < weight) {
+ struct bcm4908_enet_dma_ring_bd *buf_desc;
+ struct bcm4908_enet_dma_ring_slot slot;
+ u32 ctl;
+ int len;
+ int err;
+
+ buf_desc = &enet->rx_ring.buf_desc[enet->rx_ring.read_idx];
+ ctl = le32_to_cpu(buf_desc->ctl);
+ if (ctl & DMA_CTL_STATUS_OWN)
+ break;
+
+ slot = enet->rx_ring.slots[enet->rx_ring.read_idx];
+
+ /* Provide new buffer before unpinning the old one */
+ err = bcm4908_enet_dma_alloc_rx_buf(enet, enet->rx_ring.read_idx);
+ if (err)
+ break;
+
+ if (++enet->rx_ring.read_idx == enet->rx_ring.length)
+ enet->rx_ring.read_idx = 0;
+
+ len = (ctl & DMA_CTL_LEN_DESC_BUFLENGTH) >> DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT;
+
+ if (len < ETH_ZLEN ||
+ (ctl & (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) != (DMA_CTL_STATUS_SOP | DMA_CTL_STATUS_EOP)) {
+ enet->netdev->stats.rx_dropped++;
+ break;
+ }
+
+ dma_unmap_single(dev, slot.dma_addr, slot.len, DMA_FROM_DEVICE);
+
+ skb_put(slot.skb, len - ETH_FCS_LEN);
+ slot.skb->protocol = eth_type_trans(slot.skb, enet->netdev);
+ netif_receive_skb(slot.skb);
+
+ enet->netdev->stats.rx_packets++;
+ enet->netdev->stats.rx_bytes += len;
+ }
+
+ if (handled < weight) {
+ napi_complete_done(napi, handled);
+ bcm4908_enet_intrs_on(enet);
+ }
+
+ return handled;
+}
+
+static int bcm4908_enet_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct bcm4908_enet *enet = netdev_priv(netdev);
+
+ bcm4908_enet_set_mtu(enet, new_mtu);
+
+ return 0;
+}
+
+static const struct net_device_ops bcm4908_enet_netdev_ops = {
+ .ndo_open = bcm4908_enet_open,
+ .ndo_stop = bcm4908_enet_stop,
+ .ndo_start_xmit = bcm4908_enet_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = bcm4908_enet_change_mtu,
+};
+
+static int bcm4908_enet_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct bcm4908_enet *enet;
+ int err;
+
+ netdev = devm_alloc_etherdev(dev, sizeof(*enet));
+ if (!netdev)
+ return -ENOMEM;
+
+ enet = netdev_priv(netdev);
+ enet->dev = dev;
+ enet->netdev = netdev;
+
+ enet->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(enet->base)) {
+ dev_err(dev, "Failed to map registers: %ld\n", PTR_ERR(enet->base));
+ return PTR_ERR(enet->base);
+ }
+
+ netdev->irq = platform_get_irq_byname(pdev, "rx");
+ if (netdev->irq < 0)
+ return netdev->irq;
+
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+
+ err = bcm4908_enet_dma_alloc(enet);
+ if (err)
+ return err;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ eth_hw_addr_random(netdev);
+ netdev->netdev_ops = &bcm4908_enet_netdev_ops;
+ netdev->min_mtu = ETH_ZLEN;
+ netdev->mtu = ETH_DATA_LEN;
+ netdev->max_mtu = ENET_MTU_MAX;
+ netif_napi_add(netdev, &enet->napi, bcm4908_enet_poll, 64);
+
+ err = register_netdev(netdev);
+ if (err) {
+ bcm4908_enet_dma_free(enet);
+ return err;
+ }
+
+ platform_set_drvdata(pdev, enet);
+
+ return 0;
+}
+
+static int bcm4908_enet_remove(struct platform_device *pdev)
+{
+ struct bcm4908_enet *enet = platform_get_drvdata(pdev);
+
+ unregister_netdev(enet->netdev);
+ netif_napi_del(&enet->napi);
+ bcm4908_enet_dma_free(enet);
+
+ return 0;
+}
+
+static const struct of_device_id bcm4908_enet_of_match[] = {
+ { .compatible = "brcm,bcm4908-enet"},
+ {},
+};
+
+static struct platform_driver bcm4908_enet_driver = {
+ .driver = {
+ .name = "bcm4908_enet",
+ .of_match_table = bcm4908_enet_of_match,
+ },
+ .probe = bcm4908_enet_probe,
+ .remove = bcm4908_enet_remove,
+};
+module_platform_driver(bcm4908_enet_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.h b/drivers/net/ethernet/broadcom/bcm4908_enet.h
new file mode 100644
index 000000000000..8a3ede2da537
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __BCM4908_ENET_H
+#define __BCM4908_ENET_H
+
+#define ENET_CONTROL 0x000
+#define ENET_MIB_CTRL 0x004
+#define ENET_MIB_CTRL_CLR_MIB 0x00000001
+#define ENET_RX_ERR_MASK 0x008
+#define ENET_MIB_MAX_PKT_SIZE 0x00C
+#define ENET_MIB_MAX_PKT_SIZE_VAL 0x00003fff
+#define ENET_DIAG_OUT 0x01c
+#define ENET_ENABLE_DROP_PKT 0x020
+#define ENET_IRQ_ENABLE 0x024
+#define ENET_IRQ_ENABLE_OVFL 0x00000001
+#define ENET_GMAC_STATUS 0x028
+#define ENET_GMAC_STATUS_ETH_SPEED_MASK 0x00000003
+#define ENET_GMAC_STATUS_ETH_SPEED_10 0x00000000
+#define ENET_GMAC_STATUS_ETH_SPEED_100 0x00000001
+#define ENET_GMAC_STATUS_ETH_SPEED_1000 0x00000002
+#define ENET_GMAC_STATUS_HD 0x00000004
+#define ENET_GMAC_STATUS_AUTO_CFG_EN 0x00000008
+#define ENET_GMAC_STATUS_LINK_UP 0x00000010
+#define ENET_IRQ_STATUS 0x02c
+#define ENET_IRQ_STATUS_OVFL 0x00000001
+#define ENET_OVERFLOW_COUNTER 0x030
+#define ENET_FLUSH 0x034
+#define ENET_FLUSH_RXFIFO_FLUSH 0x00000001
+#define ENET_FLUSH_TXFIFO_FLUSH 0x00000002
+#define ENET_RSV_SELECT 0x038
+#define ENET_BP_FORCE 0x03c
+#define ENET_BP_FORCE_FORCE 0x00000001
+#define ENET_DMA_RX_OK_TO_SEND_COUNT 0x040
+#define ENET_DMA_RX_OK_TO_SEND_COUNT_VAL 0x0000000f
+#define ENET_TX_CRC_CTRL 0x044
+#define ENET_MIB 0x200
+#define ENET_UNIMAC 0x400
+#define ENET_DMA 0x800
+#define ENET_DMA_CONTROLLER_CFG 0x800
+#define ENET_DMA_CTRL_CFG_MASTER_EN 0x00000001
+#define ENET_DMA_CTRL_CFG_FLOWC_CH1_EN 0x00000002
+#define ENET_DMA_CTRL_CFG_FLOWC_CH3_EN 0x00000004
+#define ENET_DMA_FLOWCTL_CH1_THRESH_LO 0x804
+#define ENET_DMA_FLOWCTL_CH1_THRESH_HI 0x808
+#define ENET_DMA_FLOWCTL_CH1_ALLOC 0x80c
+#define ENET_DMA_FLOWCTL_CH1_ALLOC_FORCE 0x80000000
+#define ENET_DMA_FLOWCTL_CH3_THRESH_LO 0x810
+#define ENET_DMA_FLOWCTL_CH3_THRESH_HI 0x814
+#define ENET_DMA_FLOWCTL_CH3_ALLOC 0x818
+#define ENET_DMA_FLOWCTL_CH5_THRESH_LO 0x81C
+#define ENET_DMA_FLOWCTL_CH5_THRESH_HI 0x820
+#define ENET_DMA_FLOWCTL_CH5_ALLOC 0x824
+#define ENET_DMA_FLOWCTL_CH7_THRESH_LO 0x828
+#define ENET_DMA_FLOWCTL_CH7_THRESH_HI 0x82C
+#define ENET_DMA_FLOWCTL_CH7_ALLOC 0x830
+#define ENET_DMA_CTRL_CHANNEL_RESET 0x834
+#define ENET_DMA_CTRL_CHANNEL_DEBUG 0x838
+#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_STATUS 0x840
+#define ENET_DMA_CTRL_GLOBAL_INTERRUPT_MASK 0x844
+#define ENET_DMA_CH0_CFG 0xa00 /* RX */
+#define ENET_DMA_CH1_CFG 0xa10 /* TX */
+#define ENET_DMA_CH0_STATE_RAM 0xc00 /* RX */
+#define ENET_DMA_CH1_STATE_RAM 0xc10 /* TX */
+
+#define ENET_DMA_CH_CFG 0x00 /* assorted configuration */
+#define ENET_DMA_CH_CFG_ENABLE 0x00000001 /* set to enable channel */
+#define ENET_DMA_CH_CFG_PKT_HALT 0x00000002 /* idle after an EOP flag is detected */
+#define ENET_DMA_CH_CFG_BURST_HALT 0x00000004 /* idle after finish current memory burst */
+#define ENET_DMA_CH_CFG_INT_STAT 0x04 /* interrupts control and status */
+#define ENET_DMA_CH_CFG_INT_MASK 0x08 /* interrupts mask */
+#define ENET_DMA_CH_CFG_INT_BUFF_DONE 0x00000001 /* buffer done */
+#define ENET_DMA_CH_CFG_INT_DONE 0x00000002 /* packet xfer complete */
+#define ENET_DMA_CH_CFG_INT_NO_DESC 0x00000004 /* no valid descriptors */
+#define ENET_DMA_CH_CFG_INT_RX_ERROR 0x00000008 /* rxdma detect client protocol error */
+#define ENET_DMA_CH_CFG_MAX_BURST 0x0c /* max burst length permitted */
+#define ENET_DMA_CH_CFG_MAX_BURST_DESCSIZE_SEL 0x00040000 /* DMA Descriptor Size Selection */
+#define ENET_DMA_CH_CFG_SIZE 0x10
+
+#define ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR 0x00 /* descriptor ring start address */
+#define ENET_DMA_CH_STATE_RAM_STATE_DATA 0x04 /* state/bytes done/ring offset */
+#define ENET_DMA_CH_STATE_RAM_DESC_LEN_STATUS 0x08 /* buffer descriptor status and len */
+#define ENET_DMA_CH_STATE_RAM_DESC_BASE_BUFPTR 0x0c /* buffer descrpitor current processing */
+#define ENET_DMA_CH_STATE_RAM_SIZE 0x10
+
+#define DMA_CTL_STATUS_APPEND_CRC 0x00000100
+#define DMA_CTL_STATUS_APPEND_BRCM_TAG 0x00000200
+#define DMA_CTL_STATUS_PRIO 0x00000C00 /* Prio for Tx */
+#define DMA_CTL_STATUS_WRAP 0x00001000 /* */
+#define DMA_CTL_STATUS_SOP 0x00002000 /* first buffer in packet */
+#define DMA_CTL_STATUS_EOP 0x00004000 /* last buffer in packet */
+#define DMA_CTL_STATUS_OWN 0x00008000 /* cleared by DMA, set by SW */
+#define DMA_CTL_LEN_DESC_BUFLENGTH 0x0fff0000
+#define DMA_CTL_LEN_DESC_BUFLENGTH_SHIFT 16
+#define DMA_CTL_LEN_DESC_MULTICAST 0x40000000
+#define DMA_CTL_LEN_DESC_USEFPM 0x80000000
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 916824cca3fd..fd8767213165 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -220,7 +220,7 @@ static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
/*
* refill rx queue
*/
-static int bcm_enet_refill_rx(struct net_device *dev)
+static int bcm_enet_refill_rx(struct net_device *dev, bool napi_mode)
{
struct bcm_enet_priv *priv;
@@ -228,26 +228,29 @@ static int bcm_enet_refill_rx(struct net_device *dev)
while (priv->rx_desc_count < priv->rx_ring_size) {
struct bcm_enet_desc *desc;
- struct sk_buff *skb;
- dma_addr_t p;
int desc_idx;
u32 len_stat;
desc_idx = priv->rx_dirty_desc;
desc = &priv->rx_desc_cpu[desc_idx];
- if (!priv->rx_skb[desc_idx]) {
- skb = netdev_alloc_skb(dev, priv->rx_skb_size);
- if (!skb)
+ if (!priv->rx_buf[desc_idx]) {
+ void *buf;
+
+ if (likely(napi_mode))
+ buf = napi_alloc_frag(priv->rx_frag_size);
+ else
+ buf = netdev_alloc_frag(priv->rx_frag_size);
+ if (unlikely(!buf))
break;
- priv->rx_skb[desc_idx] = skb;
- p = dma_map_single(&priv->pdev->dev, skb->data,
- priv->rx_skb_size,
- DMA_FROM_DEVICE);
- desc->address = p;
+ priv->rx_buf[desc_idx] = buf;
+ desc->address = dma_map_single(&priv->pdev->dev,
+ buf + priv->rx_buf_offset,
+ priv->rx_buf_size,
+ DMA_FROM_DEVICE);
}
- len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
+ len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
len_stat |= DMADESC_OWNER_MASK;
if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
@@ -287,7 +290,7 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
struct net_device *dev = priv->net_dev;
spin_lock(&priv->rx_lock);
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, false);
spin_unlock(&priv->rx_lock);
}
@@ -297,10 +300,12 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
struct bcm_enet_priv *priv;
+ struct list_head rx_list;
struct device *kdev;
int processed;
priv = netdev_priv(dev);
+ INIT_LIST_HEAD(&rx_list);
kdev = &priv->pdev->dev;
processed = 0;
@@ -315,6 +320,7 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
int desc_idx;
u32 len_stat;
unsigned int len;
+ void *buf;
desc_idx = priv->rx_curr_desc;
desc = &priv->rx_desc_cpu[desc_idx];
@@ -333,7 +339,6 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
priv->rx_curr_desc++;
if (priv->rx_curr_desc == priv->rx_ring_size)
priv->rx_curr_desc = 0;
- priv->rx_desc_count--;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
@@ -360,16 +365,14 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
}
/* valid packet */
- skb = priv->rx_skb[desc_idx];
+ buf = priv->rx_buf[desc_idx];
len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
/* don't include FCS */
len -= 4;
if (len < copybreak) {
- struct sk_buff *nskb;
-
- nskb = napi_alloc_skb(&priv->napi, len);
- if (!nskb) {
+ skb = napi_alloc_skb(&priv->napi, len);
+ if (unlikely(!skb)) {
/* forget packet, just rearm desc */
dev->stats.rx_dropped++;
continue;
@@ -377,26 +380,36 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
dma_sync_single_for_cpu(kdev, desc->address,
len, DMA_FROM_DEVICE);
- memcpy(nskb->data, skb->data, len);
+ memcpy(skb->data, buf + priv->rx_buf_offset, len);
dma_sync_single_for_device(kdev, desc->address,
len, DMA_FROM_DEVICE);
- skb = nskb;
} else {
- dma_unmap_single(&priv->pdev->dev, desc->address,
- priv->rx_skb_size, DMA_FROM_DEVICE);
- priv->rx_skb[desc_idx] = NULL;
+ dma_unmap_single(kdev, desc->address,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ priv->rx_buf[desc_idx] = NULL;
+
+ skb = build_skb(buf, priv->rx_frag_size);
+ if (unlikely(!skb)) {
+ skb_free_frag(buf);
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, priv->rx_buf_offset);
}
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
- netif_receive_skb(skb);
+ list_add_tail(&skb->list, &rx_list);
- } while (--budget > 0);
+ } while (processed < budget);
+
+ netif_receive_skb_list(&rx_list);
+ priv->rx_desc_count -= processed;
if (processed || !priv->rx_desc_count) {
- bcm_enet_refill_rx(dev);
+ bcm_enet_refill_rx(dev, true);
/* kick rx dma */
enet_dmac_writel(priv, priv->dma_chan_en_mask,
@@ -413,9 +426,11 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
{
struct bcm_enet_priv *priv;
+ unsigned int bytes;
int released;
priv = netdev_priv(dev);
+ bytes = 0;
released = 0;
while (priv->tx_desc_count < priv->tx_ring_size) {
@@ -452,10 +467,13 @@ static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
if (desc->len_stat & DMADESC_UNDER_MASK)
dev->stats.tx_errors++;
+ bytes += skb->len;
dev_kfree_skb(skb);
released++;
}
+ netdev_completed_queue(dev, released, bytes);
+
if (netif_queue_stopped(dev) && released)
netif_wake_queue(dev);
@@ -622,8 +640,11 @@ bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc->len_stat = len_stat;
wmb();
+ netdev_sent_queue(dev, skb->len);
+
/* kick tx dma */
- enet_dmac_writel(priv, priv->dma_chan_en_mask,
+ if (!netdev_xmit_more() || !priv->tx_desc_count)
+ enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->tx_chan);
/* stop queue if no more desc available */
@@ -845,6 +866,24 @@ static void bcm_enet_adjust_link(struct net_device *dev)
priv->pause_tx ? "tx" : "off");
}
+static void bcm_enet_free_rx_buf_ring(struct device *kdev, struct bcm_enet_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->rx_ring_size; i++) {
+ struct bcm_enet_desc *desc;
+
+ if (!priv->rx_buf[i])
+ continue;
+
+ desc = &priv->rx_desc_cpu[i];
+ dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
+ DMA_FROM_DEVICE);
+ skb_free_frag(priv->rx_buf[i]);
+ }
+ kfree(priv->rx_buf);
+}
+
/*
* open callback, allocate dma rings & buffers and start rx operation
*/
@@ -954,10 +993,10 @@ static int bcm_enet_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
+ if (!priv->rx_buf) {
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -974,8 +1013,8 @@ static int bcm_enet_open(struct net_device *dev)
enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMAC_BUFALLOC, priv->rx_chan);
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -1069,18 +1108,7 @@ static int bcm_enet_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -1159,12 +1187,12 @@ static int bcm_enet_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
napi_disable(&priv->napi);
if (priv->has_phy)
phy_stop(dev->phydev);
@@ -1186,21 +1214,10 @@ static int bcm_enet_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -1622,9 +1639,12 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
* align rx buffer size to dma burst len, account FCS since
* it's appended
*/
- priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
+ priv->rx_buf_size = ALIGN(actual_mtu + ETH_FCS_LEN,
priv->dma_maxburst * 4);
+ priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
dev->mtu = new_mtu;
return 0;
}
@@ -1709,6 +1729,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->enet_is_sw = false;
priv->dma_maxburst = BCMENET_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD;
ret = bcm_enet_change_mtu(dev, dev->mtu);
if (ret)
@@ -2126,7 +2147,7 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_skb = kcalloc(priv->tx_ring_size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ dev_err(kdev, "cannot allocate tx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
@@ -2136,11 +2157,11 @@ static int bcm_enetsw_open(struct net_device *dev)
priv->tx_curr_desc = 0;
spin_lock_init(&priv->tx_lock);
- /* init & fill rx ring with skbs */
- priv->rx_skb = kcalloc(priv->rx_ring_size, sizeof(struct sk_buff *),
+ /* init & fill rx ring with buffers */
+ priv->rx_buf = kcalloc(priv->rx_ring_size, sizeof(void *),
GFP_KERNEL);
- if (!priv->rx_skb) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (!priv->rx_buf) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
@@ -2187,8 +2208,8 @@ static int bcm_enetsw_open(struct net_device *dev)
enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
ENETDMA_BUFALLOC_REG(priv->rx_chan));
- if (bcm_enet_refill_rx(dev)) {
- dev_err(kdev, "cannot allocate rx skb queue\n");
+ if (bcm_enet_refill_rx(dev, false)) {
+ dev_err(kdev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
@@ -2287,18 +2308,7 @@ static int bcm_enetsw_open(struct net_device *dev)
return 0;
out:
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
- kfree(priv->rx_skb);
+ bcm_enet_free_rx_buf_ring(kdev, priv);
out_free_tx_skb:
kfree(priv->tx_skb);
@@ -2327,13 +2337,13 @@ static int bcm_enetsw_stop(struct net_device *dev)
{
struct bcm_enet_priv *priv;
struct device *kdev;
- int i;
priv = netdev_priv(dev);
kdev = &priv->pdev->dev;
del_timer_sync(&priv->swphy_poll);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
napi_disable(&priv->napi);
del_timer_sync(&priv->rx_timeout);
@@ -2348,21 +2358,10 @@ static int bcm_enetsw_stop(struct net_device *dev)
/* force reclaim of all tx buffers */
bcm_enet_tx_reclaim(dev, 1);
- /* free the rx skb ring */
- for (i = 0; i < priv->rx_ring_size; i++) {
- struct bcm_enet_desc *desc;
-
- if (!priv->rx_skb[i])
- continue;
-
- desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
- DMA_FROM_DEVICE);
- kfree_skb(priv->rx_skb[i]);
- }
+ /* free the rx buffer ring */
+ bcm_enet_free_rx_buf_ring(kdev, priv);
/* free remaining allocated memory */
- kfree(priv->rx_skb);
kfree(priv->tx_skb);
dma_free_coherent(kdev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
@@ -2659,6 +2658,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
+ priv->rx_buf_offset = NET_SKB_PAD + NET_IP_ALIGN;
pd = dev_get_platdata(&pdev->dev);
if (pd) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 1d3c917eb830..78f1830fb3cb 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -230,11 +230,17 @@ struct bcm_enet_priv {
/* next dirty rx descriptor to refill */
int rx_dirty_desc;
- /* size of allocated rx skbs */
- unsigned int rx_skb_size;
+ /* size of allocated rx buffers */
+ unsigned int rx_buf_size;
- /* list of skb given to hw for rx */
- struct sk_buff **rx_skb;
+ /* allocated rx buffer offset */
+ unsigned int rx_buf_offset;
+
+ /* size of allocated rx frag */
+ unsigned int rx_frag_size;
+
+ /* list of buffer given to hw for rx */
+ void **rx_buf;
/* used when rx skb allocation failed, so we defer rx queue
* refill */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0404aafd5ce5..777bbf6d2586 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -2310,33 +2311,22 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
.ndo_select_queue = bcm_sysport_select_queue,
};
-static int bcm_sysport_map_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_map_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
unsigned int q, qp, port;
- struct net_device *dev;
-
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
/* We can't be setting up queue inspection for non directly attached
* switches
*/
- if (info->switch_number)
+ if (dp->ds->index)
return 0;
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
* 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
@@ -2376,27 +2366,16 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_unmap_queues(struct notifier_block *nb,
- struct dsa_notifier_register_info *info)
+static int bcm_sysport_unmap_queues(struct net_device *dev,
+ struct net_device *slave_dev)
{
+ struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
- struct bcm_sysport_priv *priv;
- struct net_device *slave_dev;
unsigned int num_tx_queues;
- struct net_device *dev;
unsigned int q, qp, port;
- priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
- if (priv->netdev != info->master)
- return 0;
-
- dev = info->master;
-
- if (dev->netdev_ops != &bcm_sysport_netdev_ops)
- return 0;
-
- port = info->port_number;
- slave_dev = info->info.dev;
+ port = dp->index;
num_tx_queues = slave_dev->real_num_tx_queues;
@@ -2417,17 +2396,30 @@ static int bcm_sysport_unmap_queues(struct notifier_block *nb,
return 0;
}
-static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
- unsigned long event, void *ptr)
+static int bcm_sysport_netdevice_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
{
- int ret = NOTIFY_DONE;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct bcm_sysport_priv *priv;
+ int ret = 0;
+
+ priv = container_of(nb, struct bcm_sysport_priv, netdev_notifier);
+ if (priv->netdev != dev)
+ return NOTIFY_DONE;
switch (event) {
- case DSA_PORT_REGISTER:
- ret = bcm_sysport_map_queues(nb, ptr);
- break;
- case DSA_PORT_UNREGISTER:
- ret = bcm_sysport_unmap_queues(nb, ptr);
+ case NETDEV_CHANGEUPPER:
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return NOTIFY_DONE;
+
+ if (!dsa_slave_dev_check(info->upper_dev))
+ return NOTIFY_DONE;
+
+ if (info->linking)
+ ret = bcm_sysport_map_queues(dev, info->upper_dev);
+ else
+ ret = bcm_sysport_unmap_queues(dev, info->upper_dev);
break;
}
@@ -2602,9 +2594,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rx_max_coalesced_frames = 1;
u64_stats_init(&priv->syncp);
- priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
+ priv->netdev_notifier.notifier_call = bcm_sysport_netdevice_event;
- ret = register_dsa_notifier(&priv->dsa_notifier);
+ ret = register_netdevice_notifier(&priv->netdev_notifier);
if (ret) {
dev_err(&pdev->dev, "failed to register DSA notifier\n");
goto err_deregister_fixed_link;
@@ -2631,7 +2623,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
return 0;
err_deregister_notifier:
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
err_deregister_fixed_link:
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
@@ -2649,7 +2641,7 @@ static int bcm_sysport_remove(struct platform_device *pdev)
/* Not much to do, ndo_close has been called
* and we use managed allocations
*/
- unregister_dsa_notifier(&priv->dsa_notifier);
+ unregister_netdevice_notifier(&priv->netdev_notifier);
unregister_netdev(dev);
if (of_phy_is_fixed_link(dn))
of_phy_deregister_fixed_link(dn);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 3a5cb6f128f5..984f76e74b43 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/dim.h>
+#include "unimac.h"
+
/* Receive/transmit descriptor format */
#define DESC_ADDR_HI_STATUS_LEN 0x00
#define DESC_ADDR_HI_SHIFT 0
@@ -213,39 +215,6 @@ struct bcm_rsb {
/* UniMAC offset and defines */
#define SYS_PORT_UMAC_OFFSET 0x800
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_10 0
-#define CMD_SPEED_100 1
-#define CMD_SPEED_1000 2
-#define CMD_SPEED_2500 3
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00c
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
/* There is a 0xC gap between the end of RX and beginning of TX stats and then
@@ -787,7 +756,7 @@ struct bcm_sysport_priv {
struct u64_stats_sync syncp;
/* map information between switch port queues and local queues */
- struct notifier_block dsa_notifier;
+ struct notifier_block netdev_notifier;
unsigned int per_port_num_tx_queues;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 98ec1b8a7d8e..075f6e146b29 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -746,25 +746,25 @@ error:
/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
* nothing to change? Try if after stabilizng driver.
*/
-static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
- bool force)
+static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
+ bool force)
{
- u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
+ u32 cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
u32 new_val = (cmdcfg & mask) | set;
u32 cmdcfg_sr;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~0, cmdcfg_sr);
udelay(2);
if (new_val != cmdcfg || force)
- bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
+ bgmac_umac_write(bgmac, UMAC_CMD, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
+ bgmac_umac_maskset(bgmac, UMAC_CMD, ~cmdcfg_sr, 0);
udelay(2);
}
@@ -773,9 +773,9 @@ static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
u32 tmp;
tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
- bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC0, tmp);
tmp = (addr[4] << 8) | addr[5];
- bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
+ bgmac_umac_write(bgmac, UMAC_MAC1, tmp);
}
static void bgmac_set_rx_mode(struct net_device *net_dev)
@@ -783,9 +783,9 @@ static void bgmac_set_rx_mode(struct net_device *net_dev)
struct bgmac *bgmac = netdev_priv(net_dev);
if (net_dev->flags & IFF_PROMISC)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_PROMISC, true);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_PROMISC, 0, true);
}
#if 0 /* We don't use that regs yet */
@@ -825,21 +825,21 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
static void bgmac_mac_speed(struct bgmac *bgmac)
{
- u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
+ u32 mask = ~(CMD_SPEED_MASK << CMD_SPEED_SHIFT | CMD_HD_EN);
u32 set = 0;
switch (bgmac->mac_speed) {
case SPEED_10:
- set |= BGMAC_CMDCFG_ES_10;
+ set |= CMD_SPEED_10 << CMD_SPEED_SHIFT;
break;
case SPEED_100:
- set |= BGMAC_CMDCFG_ES_100;
+ set |= CMD_SPEED_100 << CMD_SPEED_SHIFT;
break;
case SPEED_1000:
- set |= BGMAC_CMDCFG_ES_1000;
+ set |= CMD_SPEED_1000 << CMD_SPEED_SHIFT;
break;
case SPEED_2500:
- set |= BGMAC_CMDCFG_ES_2500;
+ set |= CMD_SPEED_2500 << CMD_SPEED_SHIFT;
break;
default:
dev_err(bgmac->dev, "Unsupported speed: %d\n",
@@ -847,9 +847,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
}
if (bgmac->mac_duplex == DUPLEX_HALF)
- set |= BGMAC_CMDCFG_HD;
+ set |= CMD_HD_EN;
- bgmac_cmdcfg_maskset(bgmac, mask, set, true);
+ bgmac_umac_cmd_maskset(bgmac, mask, set, true);
}
static void bgmac_miiconfig(struct bgmac *bgmac)
@@ -917,7 +917,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
udelay(1);
for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
@@ -986,34 +986,34 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
- * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
- * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
+ * Specs don't say about using UMAC_CMD_SR, but in this routine
+ * UMAC_CMD is read _after_ putting chip in a reset. So it has to
* be keps until taking MAC out of the reset.
*/
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
-
- bgmac_cmdcfg_maskset(bgmac,
- ~(BGMAC_CMDCFG_TE |
- BGMAC_CMDCFG_RE |
- BGMAC_CMDCFG_RPI |
- BGMAC_CMDCFG_TAI |
- BGMAC_CMDCFG_HD |
- BGMAC_CMDCFG_ML |
- BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_RL |
- BGMAC_CMDCFG_RED |
- BGMAC_CMDCFG_PE |
- BGMAC_CMDCFG_TPI |
- BGMAC_CMDCFG_PAD_EN |
- BGMAC_CMDCFG_PF),
- BGMAC_CMDCFG_PROM |
- BGMAC_CMDCFG_NLC |
- BGMAC_CMDCFG_CFE |
- cmdcfg_sr,
- false);
+ cmdcfg_sr = CMD_SW_RESET_OLD;
+
+ bgmac_umac_cmd_maskset(bgmac,
+ ~(CMD_TX_EN |
+ CMD_RX_EN |
+ CMD_RX_PAUSE_IGNORE |
+ CMD_TX_ADDR_INS |
+ CMD_HD_EN |
+ CMD_LCL_LOOP_EN |
+ CMD_CNTL_FRM_EN |
+ CMD_RMT_LOOP_EN |
+ CMD_RX_ERR_DISC |
+ CMD_PRBL_EN |
+ CMD_TX_PAUSE_IGNORE |
+ CMD_PAD_EN |
+ CMD_PAUSE_FWD),
+ CMD_PROMISC |
+ CMD_NO_LEN_CHK |
+ CMD_CNTL_FRM_EN |
+ cmdcfg_sr,
+ false);
bgmac->mac_speed = SPEED_UNKNOWN;
bgmac->mac_duplex = DUPLEX_UNKNOWN;
@@ -1049,16 +1049,16 @@ static void bgmac_enable(struct bgmac *bgmac)
u32 mode;
if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ cmdcfg_sr = CMD_SW_RESET;
else
- cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+ cmdcfg_sr = CMD_SW_RESET_OLD;
- cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
- bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- cmdcfg_sr, true);
+ cmdcfg = bgmac_umac_read(bgmac, UMAC_CMD);
+ bgmac_umac_cmd_maskset(bgmac, ~(CMD_TX_EN | CMD_RX_EN),
+ cmdcfg_sr, true);
udelay(2);
- cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
- bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
+ cmdcfg |= CMD_TX_EN | CMD_RX_EN;
+ bgmac_umac_write(bgmac, UMAC_CMD, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
@@ -1078,7 +1078,7 @@ static void bgmac_enable(struct bgmac *bgmac)
fl_ctl = 0x03cb04cb;
bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
- bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
+ bgmac_umac_write(bgmac, UMAC_PAUSE_CTRL, 0x27fff);
}
if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
@@ -1105,18 +1105,18 @@ static void bgmac_chip_init(struct bgmac *bgmac)
bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
/* Enable 802.3x tx flow control (honor received PAUSE frames) */
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_RX_PAUSE_IGNORE, 0, true);
bgmac_set_rx_mode(bgmac->net_dev);
bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
if (bgmac->loopback)
- bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
+ bgmac_umac_cmd_maskset(bgmac, ~0, CMD_LCL_LOOP_EN, false);
else
- bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
+ bgmac_umac_cmd_maskset(bgmac, ~CMD_LCL_LOOP_EN, 0, false);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + ETHER_MAX_LEN);
bgmac_chip_intrs_on(bgmac);
@@ -1252,7 +1252,7 @@ static int bgmac_change_mtu(struct net_device *net_dev, int mtu)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + mtu);
+ bgmac_umac_write(bgmac, UMAC_MAX_FRAME_LEN, 32 + mtu);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 351c598a3ec6..110088e662ea 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -4,6 +4,8 @@
#include <linux/netdevice.h>
+#include "unimac.h"
+
#define BGMAC_DEV_CTL 0x000
#define BGMAC_DC_TSM 0x00000002
#define BGMAC_DC_CFCO 0x00000004
@@ -169,47 +171,7 @@
#define BGMAC_RX_NONPAUSE_PKTS 0x420
#define BGMAC_RX_SACHANGES 0x424
#define BGMAC_RX_UNI_PKTS 0x428
-#define BGMAC_UNIMAC_VERSION 0x800
-#define BGMAC_HDBKP_CTL 0x804
-#define BGMAC_CMDCFG 0x808 /* Configuration */
-#define BGMAC_CMDCFG_TE 0x00000001 /* Set to activate TX */
-#define BGMAC_CMDCFG_RE 0x00000002 /* Set to activate RX */
-#define BGMAC_CMDCFG_ES_MASK 0x0000000c /* Ethernet speed see gmac_speed */
-#define BGMAC_CMDCFG_ES_10 0x00000000
-#define BGMAC_CMDCFG_ES_100 0x00000004
-#define BGMAC_CMDCFG_ES_1000 0x00000008
-#define BGMAC_CMDCFG_ES_2500 0x0000000C
-#define BGMAC_CMDCFG_PROM 0x00000010 /* Set to activate promiscuous mode */
-#define BGMAC_CMDCFG_PAD_EN 0x00000020
-#define BGMAC_CMDCFG_CF 0x00000040
-#define BGMAC_CMDCFG_PF 0x00000080
-#define BGMAC_CMDCFG_RPI 0x00000100 /* Unset to enable 802.3x tx flow control */
-#define BGMAC_CMDCFG_TAI 0x00000200
-#define BGMAC_CMDCFG_HD 0x00000400 /* Set if in half duplex mode */
-#define BGMAC_CMDCFG_HD_SHIFT 10
-#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
-#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
-#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
-#define BGMAC_CMDCFG_AE 0x00400000
-#define BGMAC_CMDCFG_CFE 0x00800000
-#define BGMAC_CMDCFG_NLC 0x01000000
-#define BGMAC_CMDCFG_RL 0x02000000
-#define BGMAC_CMDCFG_RED 0x04000000
-#define BGMAC_CMDCFG_PE 0x08000000
-#define BGMAC_CMDCFG_TPI 0x10000000
-#define BGMAC_CMDCFG_AT 0x20000000
-#define BGMAC_MACADDR_HIGH 0x80c /* High 4 octets of own mac address */
-#define BGMAC_MACADDR_LOW 0x810 /* Low 2 octets of own mac address */
-#define BGMAC_RXMAX_LENGTH 0x814 /* Max receive frame length with vlan tag */
-#define BGMAC_PAUSEQUANTA 0x818
-#define BGMAC_MAC_MODE 0x844
-#define BGMAC_OUTERTAG 0x848
-#define BGMAC_INNERTAG 0x84c
-#define BGMAC_TXIPG 0x85c
-#define BGMAC_PAUSE_CTL 0xb30
-#define BGMAC_TX_FLUSH 0xb34
-#define BGMAC_RX_STATUS 0xb38
-#define BGMAC_TX_STATUS 0xb3c
+#define BGMAC_UNIMAC 0x800
/* BCMA GMAC core specific IO Control (BCMA_IOCTL) flags */
#define BGMAC_BCMA_IOCTL_SW_CLKEN 0x00000004 /* PHY Clock Enable */
@@ -556,6 +518,16 @@ static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
bgmac->write(bgmac, offset, value);
}
+static inline u32 bgmac_umac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bgmac_read(bgmac, BGMAC_UNIMAC + offset);
+}
+
+static inline void bgmac_umac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bgmac_write(bgmac, BGMAC_UNIMAC + offset, value);
+}
+
static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset)
{
return bgmac->idm_read(bgmac, offset);
@@ -609,6 +581,11 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
bgmac_maskset(bgmac, offset, ~0, set);
}
+static inline void bgmac_umac_maskset(struct bgmac *bgmac, u16 offset, u32 mask, u32 set)
+{
+ bgmac_maskset(bgmac, BGMAC_UNIMAC + offset, mask, set);
+}
+
static inline int bgmac_phy_connect(struct bgmac *bgmac)
{
return bgmac->phy_connect(bgmac);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 28069b290862..b652ed72a621 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13071,8 +13071,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index d10e4f85dd11..a680fd9c68ea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -255,7 +255,9 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
+ ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
+ ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -1265,8 +1267,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
} else {
tpa_info->hash_type = PKT_HASH_TYPE_NONE;
tpa_info->gso_type = 0;
- if (netif_msg_rx_err(bp))
- netdev_warn(bp->dev, "TPA packet without valid hash\n");
+ netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
@@ -2021,10 +2022,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
- case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
- if (netif_msg_hw(bp))
- netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n",
- data1, data2);
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
+ char *fatal_str = "non-fatal";
+
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2036,14 +2036,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
- netdev_warn(bp->dev, "Firmware fatal reset event received\n");
+ fatal_str = "fatal";
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
- } else {
- netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
- bp->fw_reset_max_dsecs * 100);
}
+ netif_warn(bp, hw, bp->dev,
+ "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ fatal_str, data1, data2,
+ bp->fw_reset_min_dsecs * 100,
+ bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
break;
+ }
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -2052,16 +2055,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
- if (!fw_health->enabled)
+ if (!fw_health->enabled) {
+ netif_info(bp, drv, bp->dev,
+ "Error recovery info: error recovery[0]\n");
break;
-
- if (netif_msg_drv(bp))
- netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
- fw_health->enabled, fw_health->master,
- bnxt_fw_health_readl(bp,
- BNXT_FW_RESET_CNT_REG),
- bnxt_fw_health_readl(bp,
- BNXT_FW_HEALTH_REG));
+ }
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
@@ -2070,8 +2068,17 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ netif_info(bp, drv, bp->dev,
+ "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
+ fw_health->master, fw_health->last_fw_reset_cnt,
+ bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ goto async_event_process_exit;
case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
struct bnxt_rx_ring_info *rxr;
u16 grp_idx;
@@ -2094,6 +2101,20 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_sched_reset(bp, rxr);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+
+ netif_notice(bp, hw, bp->dev,
+ "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
+ if (fw_health) {
+ fw_health->echo_req_data1 = data1;
+ fw_health->echo_req_data2 = data2;
+ set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
+ break;
+ }
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -2394,6 +2415,10 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
while (1) {
work_done += bnxt_poll_work(bp, cpr, budget - work_done);
@@ -2468,6 +2493,10 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
int work_done = 0;
u32 cons;
+ if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
+ napi_complete(napi);
+ return 0;
+ }
if (cpr->has_more_work) {
cpr->has_more_work = 0;
work_done = __bnxt_poll_cqs(bp, bnapi, budget);
@@ -2675,6 +2704,23 @@ static void bnxt_free_skbs(struct bnxt *bp)
bnxt_free_rx_skbs(bp);
}
+static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+{
+ u8 init_val = mem_init->init_val;
+ u16 offset = mem_init->offset;
+ u8 *p2 = p;
+ int i;
+
+ if (!init_val)
+ return;
+ if (offset == BNXT_MEM_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += mem_init->size)
+ *(p2 + i + offset) = init_val;
+}
+
static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
{
struct pci_dev *pdev = bp->pdev;
@@ -2734,9 +2780,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
if (!rmem->pg_arr[i])
return -ENOMEM;
- if (rmem->init_val)
- memset(rmem->pg_arr[i], rmem->init_val,
- rmem->page_size);
+ if (rmem->mem_init)
+ bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+ rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
(rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
@@ -4272,6 +4318,9 @@ static void bnxt_disable_int_sync(struct bnxt *bp)
{
int i;
+ if (!bp->irq_tbl)
+ return;
+
atomic_inc(&bp->intr_sem);
bnxt_disable_int(bp);
@@ -4425,6 +4474,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
if (!timeout)
timeout = DFLT_HWRM_CMD_TIMEOUT;
+ /* Limit timeout to an upper limit */
+ timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT);
/* convert timeout to usec */
timeout *= 1000;
@@ -6732,6 +6783,39 @@ func_qcfg_exit:
return rc;
}
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
+ struct hwrm_func_backing_store_qcaps_output *resp)
+{
+ struct bnxt_mem_init *mem_init;
+ u16 init_mask;
+ u8 init_val;
+ u8 *offset;
+ int i;
+
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+ offset = &resp->qp_init_offset;
+ mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+ for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
+ mem_init->init_val = init_val;
+ mem_init->offset = BNXT_MEM_INVALID_OFFSET;
+ if (!init_mask)
+ continue;
+ if (i == BNXT_CTX_MEM_INIT_STAT)
+ offset = &resp->stat_init_offset;
+ if (init_mask & (1 << i))
+ mem_init->offset = *offset * 4;
+ else
+ mem_init->init_val = 0;
+ }
+ ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
+ ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
+}
+
static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
{
struct hwrm_func_backing_store_qcaps_input req = {0};
@@ -6786,7 +6870,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
le16_to_cpu(resp->mrav_num_entries_units);
ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
- ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+
+ bnxt_init_ctx_initializer(ctx, resp);
+
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
@@ -6816,6 +6902,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
{
u8 pg_size = 0;
+ if (!rmem->nr_pages)
+ return;
+
if (BNXT_PAGE_SHIFT == 13)
pg_size = 1 << 4;
else if (BNXT_PAGE_SIZE == 16)
@@ -6845,6 +6934,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
struct hwrm_func_backing_store_cfg_input req = {0};
struct bnxt_ctx_mem_info *ctx = bp->ctx;
struct bnxt_ctx_pg_info *ctx_pg;
+ u32 req_len = sizeof(req);
__le32 *num_entries;
__le64 *pg_dir;
u32 flags = 0;
@@ -6855,6 +6945,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
if (!ctx)
return 0;
+ if (req_len > bp->hwrm_max_ext_req_len)
+ req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
req.enables = cpu_to_le32(enables);
@@ -6938,7 +7030,7 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
req.flags = cpu_to_le32(flags);
- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT);
}
static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
@@ -6957,7 +7049,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
- u8 depth, bool use_init_val)
+ u8 depth, struct bnxt_mem_init *mem_init)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
@@ -6995,8 +7087,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- if (use_init_val)
- rmem->init_val = bp->ctx->ctx_kind_initializer;
+ rmem->mem_init = mem_init;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -7011,8 +7102,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- if (use_init_val)
- rmem->init_val = bp->ctx->ctx_kind_initializer;
+ rmem->mem_init = mem_init;
rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
}
return rc;
@@ -7076,6 +7166,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_pg_info *ctx_pg;
struct bnxt_ctx_mem_info *ctx;
+ struct bnxt_mem_init *init;
u32 mem_size, ena, entries;
u32 entries_sp, min;
u32 num_mr, num_ah;
@@ -7103,39 +7194,54 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->qp_mem;
ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
extra_qps;
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->qp_entry_size) {
+ mem_size = ctx->qp_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->srq_mem;
ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->srq_entry_size) {
+ mem_size = ctx->srq_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->cq_mem;
ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
- if (rc)
- return rc;
+ if (ctx->cq_entry_size) {
+ mem_size = ctx->cq_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->vnic_mem;
ctx_pg->entries = ctx->vnic_max_vnic_entries +
ctx->vnic_max_ring_table_entries;
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
- if (rc)
- return rc;
+ if (ctx->vnic_entry_size) {
+ mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
+ if (rc)
+ return rc;
+ }
ctx_pg = &ctx->stat_mem;
ctx_pg->entries = ctx->stat_max_entries;
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
- if (rc)
- return rc;
+ if (ctx->stat_entry_size) {
+ mem_size = ctx->stat_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
+ if (rc)
+ return rc;
+ }
ena = 0;
if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
@@ -7148,10 +7254,13 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
num_mr = 1024 * 256;
num_ah = 1024 * 128;
ctx_pg->entries = num_mr + num_ah;
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
- if (rc)
- return rc;
+ if (ctx->mrav_entry_size) {
+ mem_size = ctx->mrav_entry_size * ctx_pg->entries;
+ init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
+ if (rc)
+ return rc;
+ }
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
if (ctx->mrav_num_entries_units)
ctx_pg->entries =
@@ -7160,10 +7269,12 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
ctx_pg = &ctx->tim_mem;
ctx_pg->entries = ctx->qp_mem.entries;
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
- if (rc)
- return rc;
+ if (ctx->tim_entry_size) {
+ mem_size = ctx->tim_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
+ if (rc)
+ return rc;
+ }
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
skip_rdma:
@@ -7177,10 +7288,13 @@ skip_rdma:
for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
ctx_pg = ctx->tqm_mem[i];
ctx_pg->entries = i ? entries : entries_sp;
- mem_size = ctx->tqm_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
- if (rc)
- return rc;
+ if (ctx->tqm_entry_size) {
+ mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+ rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
+ NULL);
+ if (rc)
+ return rc;
+ }
ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
}
ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
@@ -7438,9 +7552,22 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
- if (bp->fw_health)
- bp->fw_health->status_reliable = false;
- return;
+ if (!bp->chip_num) {
+ __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
+ bp->chip_num = readl(bp->bar0 +
+ BNXT_FW_HEALTH_WIN_BASE +
+ BNXT_GRC_REG_CHIP_NUM);
+ }
+ if (!BNXT_CHIP_P5(bp)) {
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+ return;
+ }
+ status_loc = BNXT_GRC_REG_STATUS_P5 |
+ BNXT_FW_HEALTH_REG_TYPE_BAR0;
+ } else {
+ status_loc = readl(hs + offsetof(struct hcomm_status,
+ fw_status_loc));
}
if (__bnxt_alloc_fw_health(bp)) {
@@ -7448,7 +7575,6 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
return;
}
- status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
@@ -8603,7 +8729,7 @@ msix_setup_exit:
static int bnxt_init_inta(struct bnxt *bp)
{
- bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
+ bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
if (!bp->irq_tbl)
return -ENOMEM;
@@ -8811,7 +8937,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
{
int i;
- if (!bp->bnapi)
+ if (!bp->bnapi ||
+ test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -8828,6 +8955,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
{
int i;
+ clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr;
@@ -8856,9 +8984,10 @@ void bnxt_tx_disable(struct bnxt *bp)
txr->dev_state = BNXT_DEV_STATE_CLOSING;
}
}
+ /* Drop carrier first to prevent TX timeout */
+ netif_carrier_off(bp->dev);
/* Stop all TX queues */
netif_tx_disable(bp->dev);
- netif_carrier_off(bp->dev);
}
void bnxt_tx_enable(struct bnxt *bp)
@@ -9334,13 +9463,60 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
static int bnxt_fw_init_one(struct bnxt *bp);
+static int bnxt_fw_reset_via_optee(struct bnxt *bp)
+{
+#ifdef CONFIG_TEE_BNXT_FW
+ int rc = tee_bnxt_fw_load();
+
+ if (rc)
+ netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
+
+ return rc;
+#else
+ netdev_err(bp->dev, "OP-TEE not supported\n");
+ return -ENODEV;
+#endif
+}
+
+static int bnxt_try_recover_fw(struct bnxt *bp)
+{
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ int retry = 0, rc;
+ u32 sts;
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ do {
+ rc = __bnxt_hwrm_ver_get(bp, true);
+ sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (!sts || !BNXT_FW_IS_BOOTING(sts))
+ break;
+ retry++;
+ } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (!BNXT_FW_IS_HEALTHY(sts)) {
+ netdev_err(bp->dev,
+ "Firmware not responding, status: 0x%x\n",
+ sts);
+ rc = -ENODEV;
+ }
+ if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
+ netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
+ return bnxt_fw_reset_via_optee(bp);
+ }
+ return rc;
+ }
+
+ return -ENODEV;
+}
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_drv_if_change_input req = {0};
bool resc_reinit = false, fw_reset = false;
+ int rc, retry = 0;
u32 flags = 0;
- int rc;
if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
return 0;
@@ -9349,10 +9525,25 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (up)
req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
mutex_lock(&bp->hwrm_cmd_lock);
- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ while (retry < BNXT_FW_IF_RETRY) {
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (rc != -EAGAIN)
+ break;
+
+ msleep(50);
+ retry++;
+ }
if (!rc)
flags = le32_to_cpu(resp->flags);
mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc == -EAGAIN)
+ return rc;
+ if (rc && up) {
+ rc = bnxt_try_recover_fw(bp);
+ fw_reset = true;
+ }
if (rc)
return rc;
@@ -9692,6 +9883,25 @@ static void bnxt_preset_reg_win(struct bnxt *bp)
static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
+static int bnxt_reinit_after_abort(struct bnxt *bp)
+{
+ int rc;
+
+ if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return -EBUSY;
+
+ rc = bnxt_fw_init_one(bp);
+ if (!rc) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (!rc) {
+ clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
+ set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
+ }
+ }
+ return rc;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -9850,8 +10060,14 @@ static int bnxt_open(struct net_device *dev)
int rc;
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
- netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
- return -ENODEV;
+ rc = bnxt_reinit_after_abort(bp);
+ if (rc) {
+ if (rc == -EBUSY)
+ netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
+ else
+ netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
+ return -ENODEV;
+ }
}
rc = bnxt_hwrm_if_change(bp, true);
@@ -10788,11 +11004,23 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
- /* When firmware is fatal state, disable PCI device to prevent
- * any potential bad DMAs before freeing kernel memory.
+ /* When firmware is in fatal state, quiesce device and disable
+ * bus master to prevent any potential bad DMAs before freeing
+ * kernel memory.
*/
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ u16 val = 0;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+ if (val == 0xffff)
+ bp->fw_reset_min_dsecs = 0;
+ bnxt_tx_disable(bp);
+ bnxt_disable_napi(bp);
+ bnxt_disable_int_sync(bp);
+ bnxt_free_irq(bp);
+ bnxt_clear_int_mode(bp);
pci_disable_device(bp->pdev);
+ }
__bnxt_close_nic(bp, true, false);
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp);
@@ -10997,6 +11225,17 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
+static void bnxt_fw_echo_reply(struct bnxt *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct hwrm_func_echo_response_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
+ req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
+ req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
+ hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -11064,6 +11303,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
bnxt_chk_missed_irq(bp);
+ if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
+ bnxt_fw_echo_reply(bp);
+
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
@@ -11180,21 +11422,6 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
-static int bnxt_fw_reset_via_optee(struct bnxt *bp)
-{
-#ifdef CONFIG_TEE_BNXT_FW
- int rc = tee_bnxt_fw_load();
-
- if (rc)
- netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
-
- return rc;
-#else
- netdev_err(bp->dev, "OP-TEE not supported\n");
- return -ENODEV;
-#endif
-}
-
static int bnxt_fw_init_one_p1(struct bnxt *bp)
{
int rc;
@@ -11203,19 +11430,10 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
rc = bnxt_hwrm_ver_get(bp);
bnxt_try_map_fw_health_reg(bp);
if (rc) {
- if (bp->fw_health && bp->fw_health->status_reliable) {
- u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
-
- netdev_err(bp->dev,
- "Firmware not responding, status: 0x%x\n",
- sts);
- if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
- netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
- rc = bnxt_fw_reset_via_optee(bp);
- if (!rc)
- rc = bnxt_hwrm_ver_get(bp);
- }
- }
+ rc = bnxt_try_recover_fw(bp);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_ver_get(bp);
if (rc)
return rc;
}
@@ -11415,6 +11633,12 @@ static void bnxt_reset_all(struct bnxt *bp)
bp->fw_reset_timestamp = jiffies;
}
+static bool bnxt_fw_reset_timeout(struct bnxt *bp)
+{
+ return time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10));
+}
+
static void bnxt_fw_reset_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
@@ -11436,8 +11660,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_timestamp));
goto fw_reset_abort;
} else if (n > 0) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
@@ -11466,8 +11689,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
- !time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ !bnxt_fw_reset_timeout(bp)) {
bnxt_queue_fw_reset_work(bp, HZ / 5);
return;
}
@@ -11491,6 +11713,20 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
u32 val;
+ if (!bp->fw_reset_min_dsecs) {
+ u16 val;
+
+ pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID,
+ &val);
+ if (val == 0xffff) {
+ if (bnxt_fw_reset_timeout(bp)) {
+ netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ goto fw_reset_abort;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 1000);
+ return;
+ }
+ }
val = bnxt_fw_health_readl(bp,
BNXT_FW_RESET_INPROG_REG);
if (val)
@@ -11509,8 +11745,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
rc = __bnxt_hwrm_ver_get(bp, true);
if (rc) {
- if (time_after(jiffies, bp->fw_reset_timestamp +
- (bp->fw_reset_max_dsecs * HZ / 10))) {
+ if (bnxt_fw_reset_timeout(bp)) {
netdev_err(bp->dev, "Firmware reset aborted\n");
goto fw_reset_abort_status;
}
@@ -12091,8 +12326,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = bnxt_xdp,
.ndo_xdp_xmit = bnxt_xdp_xmit,
.ndo_bridge_getlink = bnxt_bridge_getlink,
@@ -12544,9 +12777,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnxt_ethtool_ops;
pci_set_drvdata(pdev, dev);
- if (BNXT_PF(bp))
- bnxt_vpd_read_info(bp);
-
rc = bnxt_alloc_hwrm_resources(bp);
if (rc)
goto init_err_pci_clean;
@@ -12558,6 +12788,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ if (BNXT_PF(bp))
+ bnxt_vpd_read_info(bp);
+
if (BNXT_CHIP_P5(bp)) {
bp->flags |= BNXT_FLAG_CHIP_P5;
if (BNXT_CHIP_SR2(bp))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 51996c85547e..1259e68cba2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,7 +18,7 @@
*/
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 10
-#define DRV_VER_UPD 1
+#define DRV_VER_UPD 2
#include <linux/ethtool.h>
#include <linux/interrupt.h>
@@ -656,6 +656,7 @@ struct nqe_cn {
#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define DFLT_HWRM_CMD_TIMEOUT 500
+#define HWRM_CMD_MAX_TIMEOUT 40000
#define SHORT_HWRM_CMD_TIMEOUT 20
#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
@@ -713,6 +714,13 @@ struct bnxt_sw_rx_agg_bd {
dma_addr_t mapping;
};
+struct bnxt_mem_init {
+ u8 init_val;
+ u16 offset;
+#define BNXT_MEM_INVALID_OFFSET 0xffff
+ u16 size;
+};
+
struct bnxt_ring_mem_info {
int nr_pages;
int page_size;
@@ -722,7 +730,7 @@ struct bnxt_ring_mem_info {
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
- u8 init_val;
+ struct bnxt_mem_init *mem_init;
void **pg_arr;
dma_addr_t *dma_arr;
@@ -1345,9 +1353,14 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
+#define BNXT_GRC_REG_STATUS_P5 0x520
+
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+#define BNXT_GRC_REG_CHIP_NUM 0x48
+#define BNXT_GRC_REG_BASE 0x260000
+
#define BNXT_GRC_BASE_MASK 0xfffff000
#define BNXT_GRC_OFFSET_MASK 0x00000ffc
@@ -1441,6 +1454,8 @@ struct bnxt_ctx_pg_info {
#define BNXT_MAX_TQM_RINGS \
(BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1466,7 +1481,6 @@ struct bnxt_ctx_mem_info {
u32 tim_max_entries;
u16 mrav_num_entries_units;
u8 tqm_entries_multiple;
- u8 ctx_kind_initializer;
u8 tqm_fp_rings_count;
u32 flags;
@@ -1480,6 +1494,15 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
+
+#define BNXT_CTX_MEM_INIT_QP 0
+#define BNXT_CTX_MEM_INIT_SRQ 1
+#define BNXT_CTX_MEM_INIT_CQ 2
+#define BNXT_CTX_MEM_INIT_VNIC 3
+#define BNXT_CTX_MEM_INIT_STAT 4
+#define BNXT_CTX_MEM_INIT_MRAV 5
+#define BNXT_CTX_MEM_INIT_MAX 6
+ struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
};
struct bnxt_fw_health {
@@ -1508,6 +1531,8 @@ struct bnxt_fw_health {
u32 fw_reset_seq_regs[16];
u32 fw_reset_seq_vals[16];
u32 fw_reset_seq_delay_msec[16];
+ u32 echo_req_data1;
+ u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
struct devlink_health_reporter *fw_reset_reporter;
struct devlink_health_reporter *fw_fatal_reporter;
@@ -1532,9 +1557,22 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \
((reg) & BNXT_GRC_OFFSET_MASK))
+#define BNXT_FW_STATUS_HEALTH_MSK 0xffff
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
+#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_BOOTING(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) < \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_RETRY 5
+#define BNXT_FW_IF_RETRY 10
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1788,6 +1826,7 @@ struct bnxt {
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
+#define BNXT_STATE_NAPI_DISABLED 9
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1903,6 +1942,7 @@ struct bnxt {
#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
#define BNXT_FW_EXCEPTION_SP_EVENT 19
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
+#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
struct delayed_work fw_reset_task;
int fw_reset_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 6b7b69ed62db..64381be935a8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -44,21 +44,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- u32 val, health_status;
+ u32 val;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
return 0;
val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
- health_status = val & 0xffff;
- if (health_status < BNXT_FW_STATUS_HEALTHY) {
+ if (BNXT_FW_IS_BOOTING(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Not yet completed initialization");
if (rc)
return rc;
- } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
+ } else if (BNXT_FW_IS_ERR(val)) {
rc = devlink_fmsg_string_pair_put(fmsg, "Description",
"Encountered fatal error and cannot recover");
if (rc)
@@ -472,8 +471,8 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
u32 ver = nvm_cfg_ver.vu32;
- sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
- ver & 0xF);
+ sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xf, (ver >> 8) & 0xf,
+ ver & 0xf);
rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
buf);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 2d3e962bdac3..6199f125bc13 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2020 Broadcom Inc.
+ * Copyright (c) 2018-2021 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -103,6 +103,7 @@ struct hwrm_short_input {
struct cmd_nums {
__le16 req_type;
#define HWRM_VER_GET 0x0UL
+ #define HWRM_FUNC_ECHO_RESPONSE 0xbUL
#define HWRM_ERROR_RECOVERY_QCFG 0xcUL
#define HWRM_FUNC_DRV_IF_CHANGE 0xdUL
#define HWRM_FUNC_BUF_UNRGTR 0xeUL
@@ -164,6 +165,7 @@ struct cmd_nums {
#define HWRM_VNIC_PLCMODES_CFG 0x48UL
#define HWRM_VNIC_PLCMODES_QCFG 0x49UL
#define HWRM_VNIC_QCAPS 0x4aUL
+ #define HWRM_VNIC_UPDATE 0x4bUL
#define HWRM_RING_ALLOC 0x50UL
#define HWRM_RING_FREE 0x51UL
#define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
@@ -184,6 +186,9 @@ struct cmd_nums {
#define HWRM_QUEUE_MPLS_QCAPS 0x80UL
#define HWRM_QUEUE_MPLSTC2PRI_QCFG 0x81UL
#define HWRM_QUEUE_MPLSTC2PRI_CFG 0x82UL
+ #define HWRM_QUEUE_VLANPRI_QCAPS 0x83UL
+ #define HWRM_QUEUE_VLANPRI2PRI_QCFG 0x84UL
+ #define HWRM_QUEUE_VLANPRI2PRI_CFG 0x85UL
#define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
#define HWRM_CFA_L2_FILTER_FREE 0x91UL
#define HWRM_CFA_L2_FILTER_CFG 0x92UL
@@ -217,6 +222,8 @@ struct cmd_nums {
#define HWRM_PORT_TX_FIR_CFG 0xbbUL
#define HWRM_PORT_TX_FIR_QCFG 0xbcUL
#define HWRM_PORT_ECN_QSTATS 0xbdUL
+ #define HWRM_FW_LIVEPATCH_QUERY 0xbeUL
+ #define HWRM_FW_LIVEPATCH 0xbfUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -347,6 +354,8 @@ struct cmd_nums {
#define HWRM_FUNC_HOST_PF_IDS_QUERY 0x197UL
#define HWRM_FUNC_QSTATS_EXT 0x198UL
#define HWRM_STAT_EXT_CTX_QUERY 0x199UL
+ #define HWRM_FUNC_SPD_CFG 0x19aUL
+ #define HWRM_FUNC_SPD_QCFG 0x19bUL
#define HWRM_SELFTEST_QLIST 0x200UL
#define HWRM_SELFTEST_EXEC 0x201UL
#define HWRM_SELFTEST_IRQ 0x202UL
@@ -359,6 +368,11 @@ struct cmd_nums {
#define HWRM_MFG_HDMA_TEST 0x209UL
#define HWRM_MFG_FRU_EEPROM_WRITE 0x20aUL
#define HWRM_MFG_FRU_EEPROM_READ 0x20bUL
+ #define HWRM_MFG_SOC_IMAGE 0x20cUL
+ #define HWRM_MFG_SOC_QSTATUS 0x20dUL
+ #define HWRM_MFG_PARAM_SEEPROM_SYNC 0x20eUL
+ #define HWRM_MFG_PARAM_SEEPROM_READ 0x20fUL
+ #define HWRM_MFG_PARAM_SEEPROM_HEALTH 0x210UL
#define HWRM_TF 0x2bcUL
#define HWRM_TF_VERSION_GET 0x2bdUL
#define HWRM_TF_SESSION_OPEN 0x2c6UL
@@ -384,6 +398,7 @@ struct cmd_nums {
#define HWRM_TF_EXT_EM_QCFG 0x2e9UL
#define HWRM_TF_EM_INSERT 0x2eaUL
#define HWRM_TF_EM_DELETE 0x2ebUL
+ #define HWRM_TF_EM_HASH_INSERT 0x2ecUL
#define HWRM_TF_TCAM_SET 0x2f8UL
#define HWRM_TF_TCAM_GET 0x2f9UL
#define HWRM_TF_TCAM_MOVE 0x2faUL
@@ -486,9 +501,9 @@ struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 68
-#define HWRM_VERSION_STR "1.10.1.68"
+#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_RSVD 16
+#define HWRM_VERSION_STR "1.10.2.16"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -563,8 +578,9 @@ struct hwrm_ver_get_output {
__le16 max_resp_len;
__le16 def_req_timeout;
u8 flags;
- #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
- #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
+ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
+ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY_BACKING_STORE 0x4UL
u8 unused_0[2];
u8 always_1;
__le16 hwrm_intf_major;
@@ -708,6 +724,8 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE 0x3fUL
#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE 0x40UL
#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE 0x41UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x43UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -815,6 +833,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY
__le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_SFT 0
u8 opaque_v;
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL
@@ -832,7 +852,8 @@ struct hwrm_async_event_cmpl_reset_notify {
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8)
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8)
- #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET (0x4UL << 8)
+ #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FAST_RESET
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL
#define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16
};
@@ -1033,6 +1054,26 @@ struct hwrm_async_event_cmpl_deferred_response {
__le32 event_data1;
};
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1271,6 +1312,14 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_PROXY_SRC_INTF_OVERRIDE_SUPPORT 0x20UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_EVB_MODE_CFG_NOT_SUPPORTED 0x100UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED 0x200UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED 0x400UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE 0x800UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE 0x1000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED 0x2000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED 0x4000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED 0x8000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1315,6 +1364,8 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
#define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
+ #define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
+ #define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1450,6 +1501,8 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS 0x4000000UL
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE 0x8000000UL
#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE 0x10000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE 0x20000000UL
+ #define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE 0x40000000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
@@ -1731,6 +1784,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL
#define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_FAST_RESET_SUPPORT 0x80UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -1993,7 +2047,7 @@ struct hwrm_func_backing_store_qcaps_input {
__le64 resp_addr;
};
-/* hwrm_func_backing_store_qcaps_output (size:640b/80B) */
+/* hwrm_func_backing_store_qcaps_output (size:704b/88B) */
struct hwrm_func_backing_store_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -2024,13 +2078,49 @@ struct hwrm_func_backing_store_qcaps_output {
__le16 mrav_num_entries_units;
u8 tqm_entries_multiple;
u8 ctx_kind_initializer;
- __le32 rsvd;
- __le16 rsvd1;
+ __le16 ctx_init_mask;
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_QP 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_CQ 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_STAT 0x10UL
+ #define FUNC_BACKING_STORE_QCAPS_RESP_CTX_INIT_MASK_MRAV 0x20UL
+ u8 qp_init_offset;
+ u8 srq_init_offset;
+ u8 cq_init_offset;
+ u8 vnic_init_offset;
u8 tqm_fp_rings_count;
- u8 valid;
+ u8 stat_init_offset;
+ u8 mrav_init_offset;
+ u8 tqm_fp_rings_count_ext;
+ u8 rsvd[5];
+ u8 valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+ u8 tqm_ring_pg_size_tqm_ring_lvl;
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK 0xfUL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT 0
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0 0x0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1 0x1UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2 0x2UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT 4
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+ u8 unused[3];
+ __le32 tqm_ring_num_entries;
+ __le64 tqm_ring_page_dir;
};
-/* hwrm_func_backing_store_cfg_input (size:2048b/256B) */
+/* hwrm_func_backing_store_cfg_input (size:2432b/304B) */
struct hwrm_func_backing_store_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2041,22 +2131,25 @@ struct hwrm_func_backing_store_cfg_input {
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE 0x1UL
#define FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT 0x2UL
__le32 enables;
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
- #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ 0x4UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC 0x8UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT 0x10UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP 0x20UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING0 0x40UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING1 0x80UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING2 0x100UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING3 0x200UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING4 0x400UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING5 0x800UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING6 0x1000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING7 0x2000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV 0x4000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM 0x8000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING8 0x10000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING9 0x20000UL
+ #define FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_RING10 0x40000UL
u8 qpc_pg_size_qpc_lvl;
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_MASK 0xfUL
#define FUNC_BACKING_STORE_CFG_REQ_QPC_LVL_SFT 0
@@ -2358,6 +2451,63 @@ struct hwrm_func_backing_store_cfg_input {
__le16 tqm_entry_size;
__le16 mrav_entry_size;
__le16 tim_entry_size;
+ u8 tqm_ring8_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING8_TQM_RING_PG_SIZE_PG_1G
+ u8 ring8_unused[3];
+ __le32 tqm_ring8_num_entries;
+ __le64 tqm_ring8_page_dir;
+ u8 tqm_ring9_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING9_TQM_RING_PG_SIZE_PG_1G
+ u8 ring9_unused[3];
+ __le32 tqm_ring9_num_entries;
+ __le64 tqm_ring9_page_dir;
+ u8 tqm_ring10_pg_size_tqm_ring_lvl;
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_MASK 0xfUL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_SFT 0
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_0 0x0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_1 0x1UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2 0x2UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_LVL_LVL_2
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_MASK 0xf0UL
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_SFT 4
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_4K (0x0UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8K (0x1UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_64K (0x2UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_2M (0x3UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_8M (0x4UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G (0x5UL << 4)
+ #define FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_LAST FUNC_BACKING_STORE_CFG_REQ_RING10_TQM_RING_PG_SIZE_PG_1G
+ u8 ring10_unused[3];
+ __le32 tqm_ring10_num_entries;
+ __le64 tqm_ring10_page_dir;
};
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -2463,6 +2613,27 @@ struct hwrm_error_recovery_qcfg_output {
u8 valid;
};
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 event_data1;
+ __le32 event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
@@ -2930,6 +3101,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le16 support_pam4_speeds;
@@ -3528,8 +3700,8 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
#define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xc0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 6
+ #define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN 0x40UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS 0x80UL
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -4119,7 +4291,10 @@ struct hwrm_queue_qportcfg_output {
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS_NIC 0x3UL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
#define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
- u8 unused_0;
+ u8 queue_id0_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_TYPE_CNP 0x4UL
char qid0_name[16];
char qid1_name[16];
char qid2_name[16];
@@ -4128,7 +4303,34 @@ struct hwrm_queue_qportcfg_output {
char qid5_name[16];
char qid6_name[16];
char qid7_name[16];
- u8 unused_1[7];
+ u8 queue_id1_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id2_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id3_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id4_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id5_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id6_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_TYPE_CNP 0x4UL
+ u8 queue_id7_service_profile_type;
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_ROCE 0x1UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_NIC 0x2UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_TYPE_CNP 0x4UL
u8 valid;
};
@@ -5142,8 +5344,10 @@ struct hwrm_vnic_alloc_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
- u8 unused_0[4];
+ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+ #define VNIC_ALLOC_REQ_FLAGS_VIRTIO_NET_FID_VALID 0x2UL
+ __le16 virtio_net_fid;
+ u8 unused_0[2];
};
/* hwrm_vnic_alloc_output (size:128b/16B) */
@@ -5260,6 +5464,9 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_OUTERMOST_RSS_CAP 0x80UL
#define VNIC_QCAPS_RESP_FLAGS_COS_ASSIGNMENT_CAP 0x100UL
#define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP 0x200UL
+ #define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP 0x400UL
+ #define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
+ #define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -5585,11 +5792,15 @@ struct hwrm_ring_alloc_output {
__le16 resp_len;
__le16 ring_id;
__le16 logical_ring_id;
- u8 unused_0[3];
+ u8 push_buffer_index;
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_ALLOC_RESP_PUSH_BUFFER_INDEX_LAST RING_ALLOC_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[2];
u8 valid;
};
-/* hwrm_ring_free_input (size:192b/24B) */
+/* hwrm_ring_free_input (size:256b/32B) */
struct hwrm_ring_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -5604,9 +5815,13 @@ struct hwrm_ring_free_input {
#define RING_FREE_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_FREE_REQ_RING_TYPE_NQ 0x5UL
#define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_NQ
- u8 unused_0;
+ u8 flags;
+ #define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+ #define RING_FREE_REQ_FLAGS_LAST RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
__le16 ring_id;
- u8 unused_1[4];
+ __le32 prod_idx;
+ __le32 opaque;
+ __le32 unused_1;
};
/* hwrm_ring_free_output (size:128b/16B) */
@@ -5644,7 +5859,11 @@ struct hwrm_ring_reset_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
- u8 unused_0[4];
+ u8 push_buffer_index;
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PING_BUFFER 0x0UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER 0x1UL
+ #define RING_RESET_RESP_PUSH_BUFFER_INDEX_LAST RING_RESET_RESP_PUSH_BUFFER_INDEX_PONG_BUFFER
+ u8 unused_0[3];
u8 consumer_idx[3];
u8 valid;
};
@@ -6988,21 +7207,23 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
__le16 seq_id;
__le16 resp_len;
__le32 flags;
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
- #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_16BIT_SUPPORTED 0x1UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_HND_64BIT_SUPPORTED 0x2UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_BATCH_DELETE_SUPPORTED 0x4UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_RESET_ALL_SUPPORTED 0x8UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_DEST_FUNC_SUPPORTED 0x10UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TX_EEM_FLOW_SUPPORTED 0x20UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RX_EEM_FLOW_SUPPORTED 0x40UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_FLOW_COUNTER_ALLOC_SUPPORTED 0x80UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_SUPPORTED 0x100UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_UNTAGGED_VLAN_SUPPORTED 0x200UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_XDP_SUPPORTED 0x400UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_HEADER_SOURCE_FIELDS_SUPPORTED 0x800UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ARP_SUPPORTED 0x1000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED 0x2000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_RX_ETHERTYPE_IP_SUPPORTED 0x4000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
u8 unused_0[3];
u8 valid;
};
@@ -7420,7 +7641,13 @@ struct hwrm_fw_qstatus_output {
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER 0x3UL
#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
- u8 unused_0[6];
+ u8 nvm_option_action_status;
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE 0x0UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+ #define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+ u8 unused_0[5];
u8 valid;
};
@@ -7472,7 +7699,8 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
- #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_RSS_V2
+ #define STRUCT_HDR_STRUCT_ID_MSIX_PER_VF 0xc8UL
+ #define STRUCT_HDR_STRUCT_ID_LAST STRUCT_HDR_STRUCT_ID_MSIX_PER_VF
__le16 len;
u8 version;
u8 count;
@@ -8000,6 +8228,9 @@ struct hwrm_dbg_coredump_initiate_output {
struct coredump_data_hdr {
__le32 address;
__le32 flags_length;
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_MASK 0xffffffUL
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_ACTUAL_LEN_SFT 0
+ #define COREDUMP_DATA_HDR_FLAGS_LENGTH_INDIRECT_ACCESS 0x1000000UL
__le32 instance;
__le32 next_offset;
};
@@ -8669,7 +8900,6 @@ struct hcomm_status {
#define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
#define HCOMM_STATUS_TRUE_OFFSET_SFT 2
};
-
#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index fcc262064766..641303894341 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -133,12 +133,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
txr = rxr->bnapi->tx_ring;
- xdp.data_hard_start = *data_ptr - offset;
- xdp.data = *data_ptr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = *data_ptr + *len;
- xdp.rxq = &rxr->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
+ xdp_init_buff(&xdp, PAGE_SIZE, &rxr->xdp_rxq);
+ xdp_prepare_buff(&xdp, *data_ptr - offset, offset, *len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index f6ca01da141d..0a6d91b0f0aa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -16,6 +16,8 @@
#include <linux/dim.h>
#include <linux/ethtool.h>
+#include "../unimac.h"
+
/* total number of Buffer Descriptors, same for Rx/Tx */
#define TOTAL_DESC 256
@@ -150,63 +152,6 @@ struct bcmgenet_mib_counters {
u32 tx_realloc_tsb_failed;
};
-#define UMAC_HD_BKP_CTRL 0x004
-#define HD_FC_EN (1 << 0)
-#define HD_FC_BKOFF_OK (1 << 1)
-#define IPG_CONFIG_RX_SHIFT 2
-#define IPG_CONFIG_RX_MASK 0x1F
-
-#define UMAC_CMD 0x008
-#define CMD_TX_EN (1 << 0)
-#define CMD_RX_EN (1 << 1)
-#define UMAC_SPEED_10 0
-#define UMAC_SPEED_100 1
-#define UMAC_SPEED_1000 2
-#define UMAC_SPEED_2500 3
-#define CMD_SPEED_SHIFT 2
-#define CMD_SPEED_MASK 3
-#define CMD_PROMISC (1 << 4)
-#define CMD_PAD_EN (1 << 5)
-#define CMD_CRC_FWD (1 << 6)
-#define CMD_PAUSE_FWD (1 << 7)
-#define CMD_RX_PAUSE_IGNORE (1 << 8)
-#define CMD_TX_ADDR_INS (1 << 9)
-#define CMD_HD_EN (1 << 10)
-#define CMD_SW_RESET (1 << 13)
-#define CMD_LCL_LOOP_EN (1 << 15)
-#define CMD_AUTO_CONFIG (1 << 22)
-#define CMD_CNTL_FRM_EN (1 << 23)
-#define CMD_NO_LEN_CHK (1 << 24)
-#define CMD_RMT_LOOP_EN (1 << 25)
-#define CMD_PRBL_EN (1 << 27)
-#define CMD_TX_PAUSE_IGNORE (1 << 28)
-#define CMD_TX_RX_EN (1 << 29)
-#define CMD_RUNT_FILTER_DIS (1 << 30)
-
-#define UMAC_MAC0 0x00C
-#define UMAC_MAC1 0x010
-#define UMAC_MAX_FRAME_LEN 0x014
-
-#define UMAC_MODE 0x44
-#define MODE_LINK_STATUS (1 << 5)
-
-#define UMAC_EEE_CTRL 0x064
-#define EN_LPI_RX_PAUSE (1 << 0)
-#define EN_LPI_TX_PFC (1 << 1)
-#define EN_LPI_TX_PAUSE (1 << 2)
-#define EEE_EN (1 << 3)
-#define RX_FIFO_CHECK (1 << 4)
-#define EEE_TX_CLK_DIS (1 << 5)
-#define DIS_EEE_10M (1 << 6)
-#define LP_IDLE_PREDICTION_MODE (1 << 7)
-
-#define UMAC_EEE_LPI_TIMER 0x068
-#define UMAC_EEE_WAKE_TIMER 0x06C
-#define UMAC_EEE_REF_COUNT 0x070
-#define EEE_REFERENCE_COUNT_MASK 0xffff
-
-#define UMAC_TX_FLUSH 0x334
-
#define UMAC_MIB_START 0x400
#define UMAC_MDIO_CMD 0x614
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6fb6c3556285..5335244e4577 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -63,11 +63,11 @@ void bcmgenet_mii_setup(struct net_device *dev)
/* speed */
if (phydev->speed == SPEED_1000)
- cmd_bits = UMAC_SPEED_1000;
+ cmd_bits = CMD_SPEED_1000;
else if (phydev->speed == SPEED_100)
- cmd_bits = UMAC_SPEED_100;
+ cmd_bits = CMD_SPEED_100;
else
- cmd_bits = UMAC_SPEED_10;
+ cmd_bits = CMD_SPEED_10;
cmd_bits <<= CMD_SPEED_SHIFT;
/* duplex */
@@ -359,7 +359,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
* those versions of GENET.
*/
if (priv->internal_phy && !GENET_IS_V5(priv))
- dev->phydev->irq = PHY_IGNORE_INTERRUPT;
+ dev->phydev->irq = PHY_MAC_INTERRUPT;
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5143cdd0eeca..d2381929931b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -1580,12 +1580,6 @@ static int tg3_mdio_init(struct tg3 *tp)
PHY_BRCM_RX_REFCLK_UNUSED |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_AUTO_PWRDWN_ENABLE;
- if (tg3_flag(tp, RGMII_INBAND_DISABLE))
- phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
- if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
- phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
- if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
- phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
fallthrough;
case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
@@ -12826,11 +12820,13 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
offset = tg3_nvram_logical_addr(tp, offset);
}
- }
- if (!offset || !len) {
- offset = TG3_NVM_VPD_OFF;
- len = TG3_NVM_VPD_LEN;
+ if (!offset || !len) {
+ offset = TG3_NVM_VPD_OFF;
+ len = TG3_NVM_VPD_LEN;
+ }
+ } else {
+ len = TG3_NVM_PCI_VPD_MAX_LEN;
}
buf = kmalloc(len, GFP_KERNEL);
@@ -12846,26 +12842,16 @@ static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
goto error;
}
+ *vpdlen = len;
} else {
- u8 *ptr;
ssize_t cnt;
- unsigned int pos = 0;
-
- ptr = (u8 *)&buf[0];
- for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
- cnt = pci_read_vpd(tp->pdev, pos,
- len - pos, ptr);
- if (cnt == -ETIMEDOUT || cnt == -EINTR)
- cnt = 0;
- else if (cnt < 0)
- goto error;
- }
- if (pos != len)
+
+ cnt = pci_read_vpd(tp->pdev, 0, len, (u8 *)buf);
+ if (cnt < 0)
goto error;
+ *vpdlen = cnt;
}
- *vpdlen = len;
-
return buf;
error:
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1000c894064f..46ec4fdfd16a 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2101,6 +2101,7 @@
/* Hardware Legacy NVRAM layout */
#define TG3_NVM_VPD_OFF 0x100
#define TG3_NVM_VPD_LEN 256
+#define TG3_NVM_PCI_VPD_MAX_LEN 512
/* Hardware Selfboot NVRAM layout */
#define TG3_NVM_HWSB_CFG1 0x00000004
diff --git a/drivers/net/ethernet/broadcom/unimac.h b/drivers/net/ethernet/broadcom/unimac.h
new file mode 100644
index 000000000000..585a85286257
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/unimac.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __UNIMAC_H
+#define __UNIMAC_H
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET_OLD (1 << 11)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_RX_ERR_DISC (1 << 26)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_PAUSE_QUANTA 0x018
+#define UMAC_MODE 0x044
+#define MODE_LINK_STATUS (1 << 5)
+#define UMAC_FRM_TAG0 0x048 /* outer tag */
+#define UMAC_FRM_TAG1 0x04c /* inner tag */
+#define UMAC_TX_IPG_LEN 0x05c
+#define UMAC_EEE_CTRL 0x064
+#define EN_LPI_RX_PAUSE (1 << 0)
+#define EN_LPI_TX_PFC (1 << 1)
+#define EN_LPI_TX_PAUSE (1 << 2)
+#define EEE_EN (1 << 3)
+#define RX_FIFO_CHECK (1 << 4)
+#define EEE_TX_CLK_DIS (1 << 5)
+#define DIS_EEE_10M (1 << 6)
+#define LP_IDLE_PREDICTION_MODE (1 << 7)
+#define UMAC_EEE_LPI_TIMER 0x068
+#define UMAC_EEE_WAKE_TIMER 0x06C
+#define UMAC_EEE_REF_COUNT 0x070
+#define EEE_REFERENCE_COUNT_MASK 0xffff
+#define UMAC_RX_IPG_INV 0x078
+#define UMAC_MACSEC_PROG_TX_CRC 0x310
+#define UMAC_MACSEC_CTRL 0x314
+#define UMAC_PAUSE_CTRL 0x330
+#define UMAC_TX_FLUSH 0x334
+#define UMAC_RX_FIFO_STATUS 0x338
+#define UMAC_TX_FIFO_STATUS 0x33c
+
+#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 814a5b10141d..472bf8f220bc 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -470,6 +470,10 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
return;
+ /* In case of MII the PHY is the clock master */
+ if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
+ return;
+
switch (speed) {
case SPEED_10:
rate = 2500000;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 37d064193f0f..2a0d64e5797c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1163,7 +1163,7 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
oct->flags |= LIO_FLAG_MSI_ENABLED;
/* allocate storage for the names assigned to the irq */
- oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
+ oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
if (!oct->irq_name_storage)
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7d00d3a8ded4..7c5af4beedc6 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -3219,8 +3219,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_set_vf_mac = liquidio_set_vf_mac,
.ndo_set_vf_vlan = liquidio_set_vf_vlan,
.ndo_get_vf_config = liquidio_get_vf_config,
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 103440f97bc8..516f166ceff8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1879,8 +1879,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index 387a57cbfb73..e159194d0aef 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -545,7 +545,7 @@ static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES];
static u32 octeon_device_count;
/* locks device array (i.e. octeon_device[]) */
-static spinlock_t octeon_devices_lock;
+static DEFINE_SPINLOCK(octeon_devices_lock);
static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
@@ -563,7 +563,6 @@ void octeon_init_device_list(int conf_type)
memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
for (i = 0; i < MAX_OCTEON_DEVICES; i++)
oct_set_config_info(i, conf_type);
- spin_lock_init(&octeon_devices_lock);
}
static void *__retrieve_octeon_config_info(struct octeon_device *oct,
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 5e50bb19bf26..ecffebd513be 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1556,18 +1556,7 @@ static struct platform_driver octeon_mgmt_driver = {
.remove = octeon_mgmt_remove,
};
-static int __init octeon_mgmt_mod_init(void)
-{
- return platform_driver_register(&octeon_mgmt_driver);
-}
-
-static void __exit octeon_mgmt_mod_exit(void)
-{
- platform_driver_unregister(&octeon_mgmt_driver);
-}
-
-module_init(octeon_mgmt_mod_init);
-module_exit(octeon_mgmt_mod_exit);
+module_platform_driver(octeon_mgmt_driver);
MODULE_SOFTDEP("pre: mdio-cavium");
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f3b7b443f964..c33b4e837515 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -530,6 +530,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
struct rcv_queue *rq, struct sk_buff **skb)
{
+ unsigned char *hard_start, *data;
struct xdp_buff xdp;
struct page *page;
u32 action;
@@ -547,12 +548,11 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page);
- xdp.data = (void *)cpu_addr;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + len;
- xdp.rxq = &rq->xdp_rxq;
- xdp.frame_sz = RCV_FRAG_LEN + XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ &rq->xdp_rxq);
+ hard_start = page_address(page);
+ data = (unsigned char *)cpu_addr;
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
orig_data = xdp.data;
rcu_read_lock();
diff --git a/drivers/net/ethernet/chelsio/cxgb/common.h b/drivers/net/ethernet/chelsio/cxgb/common.h
index 6475060649e9..0321be77366c 100644
--- a/drivers/net/ethernet/chelsio/cxgb/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb/common.h
@@ -238,7 +238,6 @@ struct adapter {
int msg_enable;
u32 mmio_len;
- struct work_struct ext_intr_handler_task;
struct adapter_params params;
/* Terminator modules. */
@@ -257,6 +256,7 @@ struct adapter {
/* guards async operations */
spinlock_t async_lock ____cacheline_aligned;
+ u32 pending_thread_intr;
u32 slow_intr_mask;
int t1powersave;
};
@@ -334,8 +334,7 @@ void t1_interrupts_enable(adapter_t *adapter);
void t1_interrupts_disable(adapter_t *adapter);
void t1_interrupts_clear(adapter_t *adapter);
int t1_elmer0_ext_intr_handler(adapter_t *adapter);
-void t1_elmer0_ext_intr(adapter_t *adapter);
-int t1_slow_intr_handler(adapter_t *adapter);
+irqreturn_t t1_slow_intr_handler(adapter_t *adapter);
int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct board_info *t1_get_board_info(unsigned int board_id);
@@ -347,7 +346,6 @@ int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
int t1_init_hw_modules(adapter_t *adapter);
int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
void t1_free_sw_modules(adapter_t *adapter);
-void t1_fatal_err(adapter_t *adapter);
void t1_link_changed(adapter_t *adapter, int port_id);
void t1_link_negotiated(adapter_t *adapter, int port_id, int link_stat,
int speed, int duplex, int pause);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 0e4a0f413960..512da98019c6 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -211,9 +211,10 @@ static int cxgb_up(struct adapter *adapter)
t1_interrupts_clear(adapter);
adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
- err = request_irq(adapter->pdev->irq, t1_interrupt,
- adapter->params.has_msi ? 0 : IRQF_SHARED,
- adapter->name, adapter);
+ err = request_threaded_irq(adapter->pdev->irq, t1_interrupt,
+ t1_interrupt_thread,
+ adapter->params.has_msi ? 0 : IRQF_SHARED,
+ adapter->name, adapter);
if (err) {
if (adapter->params.has_msi)
pci_disable_msi(adapter->pdev);
@@ -916,51 +917,6 @@ static void mac_stats_task(struct work_struct *work)
spin_unlock(&adapter->work_lock);
}
-/*
- * Processes elmer0 external interrupts in process context.
- */
-static void ext_intr_task(struct work_struct *work)
-{
- struct adapter *adapter =
- container_of(work, struct adapter, ext_intr_handler_task);
-
- t1_elmer0_ext_intr_handler(adapter);
-
- /* Now reenable external interrupts */
- spin_lock_irq(&adapter->async_lock);
- adapter->slow_intr_mask |= F_PL_INTR_EXT;
- writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
- writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
- adapter->regs + A_PL_ENABLE);
- spin_unlock_irq(&adapter->async_lock);
-}
-
-/*
- * Interrupt-context handler for elmer0 external interrupts.
- */
-void t1_elmer0_ext_intr(struct adapter *adapter)
-{
- /*
- * Schedule a task to handle external interrupts as we require
- * a process context. We disable EXT interrupts in the interim
- * and let the task reenable them when it's done.
- */
- adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
- writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
- adapter->regs + A_PL_ENABLE);
- schedule_work(&adapter->ext_intr_handler_task);
-}
-
-void t1_fatal_err(struct adapter *adapter)
-{
- if (adapter->flags & FULL_INIT_DONE) {
- t1_sge_stop(adapter->sge);
- t1_interrupts_disable(adapter);
- }
- pr_alert("%s: encountered fatal error, operation suspended\n",
- adapter->name);
-}
-
static const struct net_device_ops cxgb_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
@@ -1062,8 +1018,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->async_lock);
spin_lock_init(&adapter->mac_lock);
- INIT_WORK(&adapter->ext_intr_handler_task,
- ext_intr_task);
INIT_DELAYED_WORK(&adapter->stats_update_task,
mac_stats_task);
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 2d9c2b5a690a..cda01f22c71c 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -940,10 +940,11 @@ void t1_sge_intr_clear(struct sge *sge)
/*
* SGE 'Error' interrupt handler
*/
-int t1_sge_intr_error_handler(struct sge *sge)
+bool t1_sge_intr_error_handler(struct sge *sge)
{
struct adapter *adapter = sge->adapter;
u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
+ bool wake = false;
if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
cause &= ~F_PACKET_TOO_BIG;
@@ -967,11 +968,14 @@ int t1_sge_intr_error_handler(struct sge *sge)
sge->stats.pkt_mismatch++;
pr_alert("%s: SGE packet mismatch\n", adapter->name);
}
- if (cause & SGE_INT_FATAL)
- t1_fatal_err(adapter);
+ if (cause & SGE_INT_FATAL) {
+ t1_interrupts_disable(adapter);
+ adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR;
+ wake = true;
+ }
writel(cause, adapter->regs + A_SG_INT_CAUSE);
- return 0;
+ return wake;
}
const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
@@ -1619,11 +1623,46 @@ int t1_poll(struct napi_struct *napi, int budget)
return work_done;
}
+irqreturn_t t1_interrupt_thread(int irq, void *data)
+{
+ struct adapter *adapter = data;
+ u32 pending_thread_intr;
+
+ spin_lock_irq(&adapter->async_lock);
+ pending_thread_intr = adapter->pending_thread_intr;
+ adapter->pending_thread_intr = 0;
+ spin_unlock_irq(&adapter->async_lock);
+
+ if (!pending_thread_intr)
+ return IRQ_NONE;
+
+ if (pending_thread_intr & F_PL_INTR_EXT)
+ t1_elmer0_ext_intr_handler(adapter);
+
+ /* This error is fatal, interrupts remain off */
+ if (pending_thread_intr & F_PL_INTR_SGE_ERR) {
+ pr_alert("%s: encountered fatal error, operation suspended\n",
+ adapter->name);
+ t1_sge_stop(adapter->sge);
+ return IRQ_HANDLED;
+ }
+
+ spin_lock_irq(&adapter->async_lock);
+ adapter->slow_intr_mask |= F_PL_INTR_EXT;
+
+ writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
+ writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+ adapter->regs + A_PL_ENABLE);
+ spin_unlock_irq(&adapter->async_lock);
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t t1_interrupt(int irq, void *data)
{
struct adapter *adapter = data;
struct sge *sge = adapter->sge;
- int handled;
+ irqreturn_t handled;
if (likely(responses_pending(adapter))) {
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
@@ -1645,10 +1684,10 @@ irqreturn_t t1_interrupt(int irq, void *data)
handled = t1_slow_intr_handler(adapter);
spin_unlock(&adapter->async_lock);
- if (!handled)
+ if (handled == IRQ_NONE)
sge->stats.unhandled_irqs++;
- return IRQ_RETVAL(handled != 0);
+ return handled;
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index a1ba591b3431..716705b96f26 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -74,6 +74,7 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *);
int t1_sge_configure(struct sge *, struct sge_params *);
int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
void t1_sge_destroy(struct sge *);
+irqreturn_t t1_interrupt_thread(int irq, void *data);
irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int);
@@ -81,7 +82,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
-int t1_sge_intr_error_handler(struct sge *);
+bool t1_sge_intr_error_handler(struct sge *sge);
void t1_sge_intr_enable(struct sge *);
void t1_sge_intr_disable(struct sge *);
void t1_sge_intr_clear(struct sge *);
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index ea0f8741d7cf..310add28fcf5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -170,7 +170,7 @@ void t1_link_changed(adapter_t *adapter, int port_id)
t1_link_negotiated(adapter, port_id, link_ok, speed, duplex, fc);
}
-static int t1_pci_intr_handler(adapter_t *adapter)
+static bool t1_pci_intr_handler(adapter_t *adapter)
{
u32 pcix_cause;
@@ -179,9 +179,13 @@ static int t1_pci_intr_handler(adapter_t *adapter)
if (pcix_cause) {
pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
pcix_cause);
- t1_fatal_err(adapter); /* PCI errors are fatal */
+ /* PCI errors are fatal */
+ t1_interrupts_disable(adapter);
+ adapter->pending_thread_intr |= F_PL_INTR_SGE_ERR;
+ pr_alert("%s: PCI error encountered.\n", adapter->name);
+ return true;
}
- return 0;
+ return false;
}
#ifdef CONFIG_CHELSIO_T1_1G
@@ -210,13 +214,16 @@ static int fpga_phy_intr_handler(adapter_t *adapter)
/*
* Slow path interrupt handler for FPGAs.
*/
-static int fpga_slow_intr(adapter_t *adapter)
+static irqreturn_t fpga_slow_intr(adapter_t *adapter)
{
u32 cause = readl(adapter->regs + A_PL_CAUSE);
+ irqreturn_t ret = IRQ_NONE;
cause &= ~F_PL_INTR_SGE_DATA;
- if (cause & F_PL_INTR_SGE_ERR)
- t1_sge_intr_error_handler(adapter->sge);
+ if (cause & F_PL_INTR_SGE_ERR) {
+ if (t1_sge_intr_error_handler(adapter->sge))
+ ret = IRQ_WAKE_THREAD;
+ }
if (cause & FPGA_PCIX_INTERRUPT_GMAC)
fpga_phy_intr_handler(adapter);
@@ -231,14 +238,19 @@ static int fpga_slow_intr(adapter_t *adapter)
/* Clear TP interrupt */
writel(tp_cause, adapter->regs + FPGA_TP_ADDR_INTERRUPT_CAUSE);
}
- if (cause & FPGA_PCIX_INTERRUPT_PCIX)
- t1_pci_intr_handler(adapter);
+ if (cause & FPGA_PCIX_INTERRUPT_PCIX) {
+ if (t1_pci_intr_handler(adapter))
+ ret = IRQ_WAKE_THREAD;
+ }
/* Clear the interrupts just processed. */
if (cause)
writel(cause, adapter->regs + A_PL_CAUSE);
- return cause != 0;
+ if (ret != IRQ_NONE)
+ return ret;
+
+ return cause == 0 ? IRQ_NONE : IRQ_HANDLED;
}
#endif
@@ -842,31 +854,45 @@ void t1_interrupts_clear(adapter_t* adapter)
/*
* Slow path interrupt handler for ASICs.
*/
-static int asic_slow_intr(adapter_t *adapter)
+static irqreturn_t asic_slow_intr(adapter_t *adapter)
{
u32 cause = readl(adapter->regs + A_PL_CAUSE);
+ irqreturn_t ret = IRQ_HANDLED;
cause &= adapter->slow_intr_mask;
if (!cause)
- return 0;
- if (cause & F_PL_INTR_SGE_ERR)
- t1_sge_intr_error_handler(adapter->sge);
+ return IRQ_NONE;
+ if (cause & F_PL_INTR_SGE_ERR) {
+ if (t1_sge_intr_error_handler(adapter->sge))
+ ret = IRQ_WAKE_THREAD;
+ }
if (cause & F_PL_INTR_TP)
t1_tp_intr_handler(adapter->tp);
if (cause & F_PL_INTR_ESPI)
t1_espi_intr_handler(adapter->espi);
- if (cause & F_PL_INTR_PCIX)
- t1_pci_intr_handler(adapter);
- if (cause & F_PL_INTR_EXT)
- t1_elmer0_ext_intr(adapter);
+ if (cause & F_PL_INTR_PCIX) {
+ if (t1_pci_intr_handler(adapter))
+ ret = IRQ_WAKE_THREAD;
+ }
+ if (cause & F_PL_INTR_EXT) {
+ /* Wake the threaded interrupt to handle external interrupts as
+ * we require a process context. We disable EXT interrupts in
+ * the interim and let the thread reenable them when it's done.
+ */
+ adapter->pending_thread_intr |= F_PL_INTR_EXT;
+ adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
+ writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+ adapter->regs + A_PL_ENABLE);
+ ret = IRQ_WAKE_THREAD;
+ }
/* Clear the interrupts just processed. */
writel(cause, adapter->regs + A_PL_CAUSE);
readl(adapter->regs + A_PL_CAUSE); /* flush writes */
- return 1;
+ return ret;
}
-int t1_slow_intr_handler(adapter_t *adapter)
+irqreturn_t t1_slow_intr_handler(adapter_t *adapter)
{
#ifdef CONFIG_CHELSIO_T1_1G
if (!t1_is_asic(adapter))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 876f90e5795e..d5218e74284c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -220,9 +220,6 @@ struct cudbg_mps_tcam {
u8 reserved[2];
};
-#define CUDBG_VPD_PF_SIZE 0x800
-#define CUDBG_SCFG_VER_ADDR 0x06
-#define CUDBG_SCFG_VER_LEN 4
#define CUDBG_VPD_VER_ADDR 0x18c7
#define CUDBG_VPD_VER_LEN 2
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 75474f810249..6c85a10f465c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -2686,10 +2686,10 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
char vpd_str[CUDBG_VPD_VER_LEN + 1];
- u32 scfg_vers, vpd_vers, fw_vers;
struct cudbg_vpd_data *vpd_data;
struct vpd_params vpd = { 0 };
- int rc, ret;
+ u32 vpd_vers, fw_vers;
+ int rc;
rc = t4_get_raw_vpd_params(padap, &vpd);
if (rc)
@@ -2699,24 +2699,6 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
if (rc)
return rc;
- /* Serial Configuration Version is located beyond the PF's vpd size.
- * Temporarily give access to entire EEPROM to get it.
- */
- rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
- if (rc < 0)
- return rc;
-
- ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
- &scfg_vers);
-
- /* Restore back to original PF's vpd size */
- rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
- if (rc < 0)
- return rc;
-
- if (ret)
- return ret;
-
rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
vpd_str);
if (rc)
@@ -2737,7 +2719,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
- vpd_data->scfg_vers = scfg_vers;
+ vpd_data->scfg_vers = t4_read_reg(padap, PCIE_STATIC_SPARE2_A);
vpd_data->vpd_vers = vpd_vers;
vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 8e681ce72d62..314f8d806723 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -414,7 +414,6 @@ struct pf_resources {
};
struct pci_params {
- unsigned int vpd_cap_addr;
unsigned char speed;
unsigned char width;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7fd264a6d085..6264bc66a4fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3201,8 +3201,6 @@ static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
int err;
u8 *na;
- adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
- PCI_CAP_ID_VPD);
err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
if (err)
return;
@@ -3882,8 +3880,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
@@ -5139,7 +5135,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip)
/* See if FW supports FW_FILTER2 work request */
if (is_t4(adap->params.chip)) {
- adap->params.filter2_wr_support = 0;
+ adap->params.filter2_wr_support = false;
} else {
params[0] = FW_PARAM_DEV(FILTER2_WR);
ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 1b49f2fa9b18..34546f5312ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -46,6 +46,9 @@
#define MAX_ULD_QSETS 16
#define MAX_ULD_NPORTS 4
+/* ulp_mem_io + ulptx_idata + payload + padding */
+#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
+
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 196652a114c5..256fae15e032 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1600,7 +1600,8 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* has opened up.
*/
eth_txq_stop(q);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (chip_ver > CHELSIO_T5)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
wr = (void *)&q->q.desc[q->q.pidx];
@@ -1832,6 +1833,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
struct adapter *adapter;
int qidx, credits, ret;
size_t fw_hdr_copy_len;
+ unsigned int chip_ver;
u64 cntrl, *end;
u32 wr_mid;
@@ -1896,6 +1898,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
goto out_free;
}
+ chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
/* After we're done injecting the Work Request for this
@@ -1907,7 +1910,8 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
* has opened up.
*/
eth_txq_stop(txq);
- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ if (chip_ver > CHELSIO_T5)
+ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
}
/* Start filling in our Work Request. Note that we do _not_ handle
@@ -1960,7 +1964,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
*/
cpl = (void *)(lso + 1);
- if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ if (chip_ver <= CHELSIO_T5)
cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
else
cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
@@ -2842,17 +2846,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
* @skb: the packet
*
* Returns true if a packet can be sent as an offload WR with immediate
- * data. We currently use the same limit as for Ethernet packets.
+ * data.
+ * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
+ * However, FW_ULPTX_WR commands have a 256 byte immediate only
+ * payload limit.
*/
static inline int is_ofld_imm(const struct sk_buff *skb)
{
struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
- if (opcode == FW_CRYPTO_LOOKASIDE_WR)
+ if (unlikely(opcode == FW_ULPTX_WR))
+ return skb->len <= MAX_IMM_ULPTX_WR_LEN;
+ else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
return skb->len <= SGE_MAX_WR_LEN;
else
- return skb->len <= MAX_IMM_TX_PKT_LEN;
+ return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
}
/**
@@ -3598,6 +3607,25 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
}
txq = &s->ethtxq[pi->first_qset + rspq->idx];
+
+ /* We've got the Hardware Consumer Index Update in the Egress Update
+ * message. These Egress Update messages will be our sole CIDX Updates
+ * we get since we don't want to chew up PCIe bandwidth for both Ingress
+ * Messages and Status Page writes. However, The code which manages
+ * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
+ * stored in the Status Page at the end of the TX Queue. It's easiest
+ * to simply copy the CIDX Update value from the Egress Update message
+ * to the Status Page. Also note that no Endian issues need to be
+ * considered here since both are Big Endian and we're just copying
+ * bytes consistently ...
+ */
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ struct cpl_sge_egr_update *egr;
+
+ egr = (struct cpl_sge_egr_update *)rsp;
+ WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
+ }
+
t4_sge_eth_txq_egress_update(adapter, txq, -1);
}
@@ -4583,11 +4611,15 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
* write the CIDX Updates into the Status Page at the end of the
* TX Queue.
*/
- c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
+ c.autoequiqe_to_viid = htonl(((chip_ver <= CHELSIO_T5) ?
+ FW_EQ_ETH_CMD_AUTOEQUIQE_F :
+ FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
FW_EQ_ETH_CMD_VIID_V(pi->viid));
c.fetchszm_to_iqid =
- htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
+ htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V((chip_ver <= CHELSIO_T5) ?
+ HOSTFCMODE_INGRESS_QUEUE_X :
+ HOSTFCMODE_STATUS_PAGE_X) |
FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
@@ -4598,6 +4630,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
: FETCHBURSTMIN_64B_T6_X) |
FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
+ FW_EQ_ETH_CMD_CIDXFTHRESHO_V(chip_ver == CHELSIO_T5) |
FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 98d01a7497ec..98829e482bfa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2689,7 +2689,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
-#define CHELSIO_VPD_UNIQUE_ID 0x82
/**
* t4_eeprom_ptov - translate a physical EEPROM address to virtual
@@ -2745,7 +2744,7 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
int i, ret = 0, addr;
int ec, sn, pn, na;
- u8 *vpd, csum;
+ u8 *vpd, csum, base_val = 0;
unsigned int vpdr_len, kw_offset, id_len;
vpd = vmalloc(VPD_LEN);
@@ -2755,17 +2754,11 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
- ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
+ ret = pci_read_vpd(adapter->pdev, VPD_BASE, 1, &base_val);
if (ret < 0)
goto out;
- /* The VPD shall have a unique identifier specified by the PCI SIG.
- * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
- * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
- * is expected to automatically put this entry at the
- * beginning of the VPD.
- */
- addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
+ addr = base_val == PCI_VPD_LRDT_ID_STRING ? VPD_BASE : VPD_BASE_OLD;
ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
if (ret < 0)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 0c5373462ced..0b1b5f9c67d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -219,6 +219,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6092), /* Custom T62100-CR-LOM */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index b11a172b5174..695916ba0405 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -884,6 +884,12 @@
#define TDUE_V(x) ((x) << TDUE_S)
#define TDUE_F TDUE_V(1U)
+/* SPARE2 register contains 32-bit value at offset 0x6 in Serial INIT
+ * Configuration flashed on EEPROM. This value corresponds to 32-bit
+ * Serial Configuration Version information.
+ */
+#define PCIE_STATIC_SPARE2_A 0x5bfc
+
/* registers for module MC */
#define MC_INT_CAUSE_A 0x7518
#define MC_P_INT_CAUSE_A 0x41318
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index 47d9268a7e3c..585590520076 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -92,9 +92,6 @@ static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
static struct cxgb4_uld_info ch_ipsec_uld_info = {
.name = CHIPSEC_DRV_MODULE_NAME,
- .nrxq = MAX_ULD_QSETS,
- /* Max ntxq will be derived from fw config file*/
- .rxq_size = 1024,
.add = ch_ipsec_uld_add,
.state_change = ch_ipsec_uld_state_change,
.tx_handler = ch_ipsec_xmit,
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index 47ba81e42f5d..b1161bdeda4d 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -50,9 +50,6 @@
#define MIN_RCV_WND (24 * 1024U)
#define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
-/* ulp_mem_io + ulptx_idata + payload + padding */
-#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
-
/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
#define TX_HEADER_LEN \
(sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index fb269d587b74..f04ec53544ae 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2509,8 +2509,6 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
@@ -2535,8 +2533,6 @@ static const struct net_device_ops enic_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = enic_rx_flow_steer,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = enic_features_check,
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index d402d83d9edd..b6eba29d8e99 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5179,8 +5179,6 @@ static const struct net_device_ops be_netdev_ops = {
#endif
.ndo_bridge_setlink = be_ndo_bridge_setlink,
.ndo_bridge_getlink = be_ndo_bridge_getlink,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = be_features_check,
.ndo_get_phys_port_id = be_get_phys_port_id,
};
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 4360ce4d3fb6..ccfe52a50a66 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2180,8 +2180,10 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
struct xdp_frame **init_xdpf)
{
struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
- void *new_buff;
+ void *new_buff, *aligned_data;
struct page *p;
+ u32 data_shift;
+ int headroom;
/* Check the data alignment and make sure the headroom is large
* enough to store the xdpf backpointer. Use an aligned headroom
@@ -2191,25 +2193,57 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv,
* byte frame headroom. If the XDP program uses all of it, copy the
* data to a new buffer and make room for storing the backpointer.
*/
- if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) &&
+ if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
xdpf->headroom >= priv->tx_headroom) {
xdpf->headroom = priv->tx_headroom;
return 0;
}
+ /* Try to move the data inside the buffer just enough to align it and
+ * store the xdpf backpointer. If the available headroom isn't large
+ * enough, resort to allocating a new buffer and copying the data.
+ */
+ aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
+ data_shift = xdpf->data - aligned_data;
+
+ /* The XDP frame's headroom needs to be large enough to accommodate
+ * shifting the data as well as storing the xdpf backpointer.
+ */
+ if (xdpf->headroom >= data_shift + priv->tx_headroom) {
+ memmove(aligned_data, xdpf->data, xdpf->len);
+ xdpf->data = aligned_data;
+ xdpf->headroom = priv->tx_headroom;
+ return 0;
+ }
+
+ /* The new xdp_frame is stored in the new buffer. Reserve enough space
+ * in the headroom for storing it along with the driver's private
+ * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
+ * guarantee the data's alignment in the buffer.
+ */
+ headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
+ DPAA_FD_DATA_ALIGNMENT);
+
+ /* Assure the extended headroom and data don't overflow the buffer,
+ * while maintaining the mandatory tailroom.
+ */
+ if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+ return -ENOMEM;
+
p = dev_alloc_pages(0);
if (unlikely(!p))
return -ENOMEM;
/* Copy the data to the new buffer at a properly aligned offset */
new_buff = page_address(p);
- memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len);
+ memcpy(new_buff + headroom, xdpf->data, xdpf->len);
/* Create an XDP frame around the new buffer in a similar fashion
* to xdp_convert_buff_to_frame.
*/
new_xdpf = new_buff;
- new_xdpf->data = new_buff + priv->tx_headroom;
+ new_xdpf->data = new_buff + headroom;
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
@@ -2532,12 +2566,10 @@ static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr,
return XDP_PASS;
}
- xdp.data = vaddr + fd_off;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp.data_end = xdp.data + qm_fd_get_length(fd);
- xdp.frame_sz = DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE;
- xdp.rxq = &dpaa_fq->xdp_rxq;
+ xdp_init_buff(&xdp, DPAA_BP_RAW_SIZE - DPAA_TX_PRIV_DATA_SIZE,
+ &dpaa_fq->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + fd_off - XDP_PACKET_HEADROOM,
+ XDP_PACKET_HEADROOM, qm_fd_get_length(fd), true);
/* We reserve a fixed headroom of 256 bytes under the erratum and we
* offer it all to XDP programs to use. If no room is left for the
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index fb0bcd18ec0c..492943bb9c48 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -350,7 +350,7 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
struct bpf_prog *xdp_prog;
struct xdp_buff xdp;
u32 xdp_act = XDP_PASS;
- int err;
+ int err, offset;
rcu_read_lock();
@@ -358,14 +358,10 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
if (!xdp_prog)
goto out;
- xdp.data = vaddr + dpaa2_fd_get_offset(fd);
- xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
- xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.rxq = &ch->xdp_rxq;
-
- xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
- (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
+ offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
+ xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
+ xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
+ dpaa2_fd_get_len(fd), false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -399,10 +395,20 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
- if (unlikely(err))
+ if (unlikely(err)) {
+ addr = dma_map_page(priv->net_dev->dev.parent,
+ virt_to_page(vaddr), 0,
+ priv->rx_buf_size, DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ ch->buf_count++;
+ dpaa2_eth_xdp_release_buf(priv, ch, addr);
+ }
ch->stats.xdp_drop++;
- else
+ } else {
ch->stats.xdp_redirect++;
+ }
break;
}
@@ -768,12 +774,11 @@ static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
/* Prepare the HW SGT structure */
sgt_buf_size = priv->tx_data_offset +
sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
- sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
+ sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
if (unlikely(!sgt_buf)) {
err = -ENOMEM;
goto sgt_buf_alloc_failed;
}
- sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
memset(sgt_buf, 0, sgt_buf_size);
sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
@@ -1262,6 +1267,22 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
+static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
+ bool enable)
+{
+ int err;
+
+ err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
+
+ if (err) {
+ netdev_err(priv->net_dev,
+ "dpni_enable_vlan_filter failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1648,7 +1669,7 @@ set_cgtd:
* CG taildrop threshold, so it won't interfere with it; we also
* want frames in non-PFC enabled traffic classes to be kept in check)
*/
- td.enable = !tx_pause || (tx_pause && pfc);
+ td.enable = !tx_pause || pfc;
if (priv->rx_cgtd_enabled == td.enable)
return;
@@ -1691,7 +1712,7 @@ static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
*/
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
goto out;
/* Chech link state; speed / duplex changes are not treated yet */
@@ -1730,7 +1751,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
priv->dpbp_dev->obj_desc.id, priv->bpid);
}
- if (!priv->mac) {
+ if (!dpaa2_eth_is_type_phy(priv)) {
/* We'll only start the txqs when the link is actually ready;
* make sure we don't race against the link up notification,
* which may come immediately after dpni_enable();
@@ -1752,7 +1773,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
phylink_start(priv->mac->phylink);
return 0;
@@ -1826,11 +1847,11 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
int dpni_enabled = 0;
int retries = 10;
- if (!priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
+ phylink_stop(priv->mac->phylink);
+ } else {
netif_tx_stop_all_queues(net_dev);
netif_carrier_off(net_dev);
- } else {
- phylink_stop(priv->mac->phylink);
}
/* On dpni_disable(), the MC firmware will:
@@ -1952,6 +1973,43 @@ static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
}
}
+static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
+ vid, 0, 0, 0);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not add the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
+static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
+ __be16 vlan_proto, u16 vid)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ int err;
+
+ err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
+
+ if (err) {
+ netdev_warn(priv->net_dev,
+ "Could not remove the vlan id %u\n",
+ vid);
+ return err;
+ }
+
+ return 0;
+}
+
static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -2058,6 +2116,13 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
bool enable;
int err;
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
+ if (err)
+ return err;
+ }
+
if (changed & NETIF_F_RXCSUM) {
enable = !!(features & NETIF_F_RXCSUM);
err = dpaa2_eth_set_rx_csum(priv, enable);
@@ -2115,7 +2180,7 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd == SIOCSHWTSTAMP)
return dpaa2_eth_ts_ioctl(dev, rq, cmd);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
return -EOPNOTSUPP;
@@ -2507,6 +2572,8 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_bpf = dpaa2_eth_xdp,
.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
.ndo_setup_tc = dpaa2_eth_setup_tc,
+ .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
};
static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
@@ -4015,6 +4082,9 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
NETIF_F_LLTX | NETIF_F_HW_TC;
net_dev->hw_features = net_dev->features;
+ if (priv->dpni_attrs.vlan_filter_entries)
+ net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
return 0;
}
@@ -4042,10 +4112,11 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
- if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
- return 0;
- if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
+ if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
+ return PTR_ERR(dpmac_dev);
+
+ if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
@@ -4056,23 +4127,38 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
mac->mc_io = priv->mc_io;
mac->net_dev = priv->net_dev;
- err = dpaa2_mac_connect(mac);
- if (err) {
- netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
- kfree(mac);
- return err;
- }
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
priv->mac = mac;
+ if (dpaa2_eth_is_type_phy(priv)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
+ goto err_close_mac;
+ }
+ }
+
return 0;
+
+err_close_mac:
+ dpaa2_mac_close(mac);
+ priv->mac = NULL;
+err_free_mac:
+ kfree(mac);
+ return err;
}
static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
- if (!priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
+ dpaa2_mac_disconnect(priv->mac);
+
+ if (!dpaa2_eth_has_mac(priv))
return;
- dpaa2_mac_disconnect(priv->mac);
+ dpaa2_mac_close(priv->mac);
kfree(priv->mac);
priv->mac = NULL;
}
@@ -4101,7 +4187,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
dpaa2_eth_update_tx_fqids(priv);
rtnl_lock();
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_eth_disconnect_mac(priv);
else
dpaa2_eth_connect_mac(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index d236b8695c39..9b6a89709ce1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -693,6 +693,21 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
}
+static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+{
+ if (priv->mac &&
+ (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+ priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+ return true;
+
+ return false;
+}
+
+static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+{
+ return priv->mac ? true : false;
+}
+
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
int dpaa2_eth_set_cls(struct net_device *net_dev, u64 key);
int dpaa2_eth_cls_key_size(u64 key);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index f981a523e13a..bf59708b869e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -85,7 +85,7 @@ static int dpaa2_eth_nway_reset(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_nway_reset(priv->mac->phylink);
return -EOPNOTSUPP;
@@ -97,7 +97,7 @@ dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_ksettings_get(priv->mac->phylink,
link_settings);
@@ -115,7 +115,7 @@ dpaa2_eth_set_link_ksettings(struct net_device *net_dev,
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
- if (!priv->mac)
+ if (!dpaa2_eth_is_type_phy(priv))
return -ENOTSUPP;
return phylink_ethtool_ksettings_set(priv->mac->phylink, link_settings);
@@ -127,7 +127,7 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
u64 link_options = priv->link_state.options;
- if (priv->mac) {
+ if (dpaa2_eth_is_type_phy(priv)) {
phylink_ethtool_get_pauseparam(priv->mac->phylink, pause);
return;
}
@@ -150,7 +150,7 @@ static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
return -EOPNOTSUPP;
}
- if (priv->mac)
+ if (dpaa2_eth_is_type_phy(priv))
return phylink_ethtool_set_pauseparam(priv->mac->phylink,
pause);
if (pause->autoneg)
@@ -198,7 +198,7 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_strings(p);
break;
}
@@ -211,7 +211,7 @@ static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
switch (sset) {
case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
num_ss_stats += dpaa2_mac_get_sset_count();
return num_ss_stats;
default:
@@ -313,7 +313,7 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
}
*(data + i++) = buf_cnt;
- if (priv->mac)
+ if (dpaa2_eth_has_mac(priv))
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 828c177df03d..ccaf7e35abeb 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -79,10 +79,20 @@ static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
phy_interface_t interface)
{
switch (interface) {
+ /* We can switch between SGMII and 1000BASE-X at runtime with
+ * pcs-lynx
+ */
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if (mac->pcs &&
+ (mac->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ mac->if_mode == PHY_INTERFACE_MODE_1000BASEX))
+ return false;
+ return interface != mac->if_mode;
+
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_QSGMII:
- case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -122,13 +132,17 @@ static void dpaa2_mac_validate(struct phylink_config *config,
fallthrough;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
phylink_set(mask, 1000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
+ break;
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 10baseT_Full);
break;
default:
goto empty_set;
@@ -174,30 +188,22 @@ static void dpaa2_mac_link_up(struct phylink_config *config,
dpmac_state->up = 1;
- if (mac->if_link_type == DPMAC_LINK_TYPE_PHY) {
- /* If the DPMAC is configured for PHY mode, we need
- * to pass the link parameters to the MC firmware.
- */
- dpmac_state->rate = speed;
-
- if (duplex == DUPLEX_HALF)
- dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
- else if (duplex == DUPLEX_FULL)
- dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
-
- /* This is lossy; the firmware really should take the pause
- * enablement status rather than pause/asym pause status.
- */
- if (rx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
-
- if (rx_pause ^ tx_pause)
- dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
- else
- dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
- }
+ dpmac_state->rate = speed;
+
+ if (duplex == DUPLEX_HALF)
+ dpmac_state->options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ else if (duplex == DUPLEX_FULL)
+ dpmac_state->options &= ~DPMAC_LINK_OPT_HALF_DUPLEX;
+
+ if (rx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_PAUSE;
+
+ if (rx_pause ^ tx_pause)
+ dpmac_state->options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+ else
+ dpmac_state->options &= ~DPMAC_LINK_OPT_ASYM_PAUSE;
err = dpmac_set_link_state(mac->mc_io, 0,
mac->mc_dev->mc_handle, dpmac_state);
@@ -228,32 +234,6 @@ static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
.mac_link_down = dpaa2_mac_link_down,
};
-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
- struct fsl_mc_io *mc_io)
-{
- struct dpmac_attr attr;
- bool fixed = false;
- u16 mc_handle = 0;
- int err;
-
- err = dpmac_open(mc_io, 0, dpmac_dev->obj_desc.id,
- &mc_handle);
- if (err || !mc_handle)
- return false;
-
- err = dpmac_get_attributes(mc_io, 0, mc_handle, &attr);
- if (err)
- goto out;
-
- if (attr.link_type == DPMAC_LINK_TYPE_FIXED)
- fixed = true;
-
-out:
- dpmac_close(mc_io, 0, mc_handle);
-
- return fixed;
-}
-
static int dpaa2_pcs_create(struct dpaa2_mac *mac,
struct device_node *dpmac_node, int id)
{
@@ -302,36 +282,20 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
- struct fsl_mc_device *dpmac_dev = mac->mc_dev;
struct net_device *net_dev = mac->net_dev;
struct device_node *dpmac_node;
struct phylink *phylink;
- struct dpmac_attr attr;
int err;
- err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
- &dpmac_dev->mc_handle);
- if (err || !dpmac_dev->mc_handle) {
- netdev_err(net_dev, "dpmac_open() = %d\n", err);
- return -ENODEV;
- }
-
- err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle, &attr);
- if (err) {
- netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
- goto err_close_dpmac;
- }
-
- mac->if_link_type = attr.link_type;
+ mac->if_link_type = mac->attr.link_type;
- dpmac_node = dpaa2_mac_get_node(attr.id);
+ dpmac_node = dpaa2_mac_get_node(mac->attr.id);
if (!dpmac_node) {
- netdev_err(net_dev, "No dpmac@%d node found.\n", attr.id);
- err = -ENODEV;
- goto err_close_dpmac;
+ netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
+ return -ENODEV;
}
- err = dpaa2_mac_get_if_mode(dpmac_node, attr);
+ err = dpaa2_mac_get_if_mode(dpmac_node, mac->attr);
if (err < 0) {
err = -EINVAL;
goto err_put_node;
@@ -351,9 +315,10 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_put_node;
}
- if (attr.link_type == DPMAC_LINK_TYPE_PHY &&
- attr.eth_if != DPMAC_ETH_IF_RGMII) {
- err = dpaa2_pcs_create(mac, dpmac_node, attr.id);
+ if ((mac->attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ mac->attr.eth_if != DPMAC_ETH_IF_RGMII) ||
+ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE) {
+ err = dpaa2_pcs_create(mac, dpmac_node, mac->attr.id);
if (err)
goto err_put_node;
}
@@ -389,8 +354,7 @@ err_pcs_destroy:
dpaa2_pcs_destroy(mac);
err_put_node:
of_node_put(dpmac_node);
-err_close_dpmac:
- dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+
return err;
}
@@ -402,8 +366,40 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
dpaa2_pcs_destroy(mac);
+}
+
+int dpaa2_mac_open(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ struct net_device *net_dev = mac->net_dev;
+ int err;
+
+ err = dpmac_open(mac->mc_io, 0, dpmac_dev->obj_desc.id,
+ &dpmac_dev->mc_handle);
+ if (err || !dpmac_dev->mc_handle) {
+ netdev_err(net_dev, "dpmac_open() = %d\n", err);
+ return -ENODEV;
+ }
+
+ err = dpmac_get_attributes(mac->mc_io, 0, dpmac_dev->mc_handle,
+ &mac->attr);
+ if (err) {
+ netdev_err(net_dev, "dpmac_get_attributes() = %d\n", err);
+ goto err_close_dpmac;
+ }
+
+ return 0;
- dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
+err_close_dpmac:
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
+ return err;
+}
+
+void dpaa2_mac_close(struct dpaa2_mac *mac)
+{
+ struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+
+ dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
}
static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 955a52856210..13d42dd58ec9 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -17,6 +17,7 @@ struct dpaa2_mac {
struct dpmac_link_state state;
struct net_device *net_dev;
struct fsl_mc_io *mc_io;
+ struct dpmac_attr attr;
struct phylink_config phylink_config;
struct phylink *phylink;
@@ -28,6 +29,10 @@ struct dpaa2_mac {
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
struct fsl_mc_io *mc_io);
+int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+void dpaa2_mac_close(struct dpaa2_mac *mac);
+
int dpaa2_mac_connect(struct dpaa2_mac *mac);
void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 90453dc7baef..9f80bdfeedec 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -62,6 +62,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
+#define DPNI_CMDID_ENABLE_VLAN_FILTER DPNI_CMD(0x230)
+#define DPNI_CMDID_ADD_VLAN_ID DPNI_CMD_V2(0x231)
+#define DPNI_CMDID_REMOVE_VLAN_ID DPNI_CMD(0x232)
+
#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
@@ -662,4 +666,17 @@ struct dpni_rsp_single_step_cfg {
__le32 peer_delay;
};
+struct dpni_cmd_enable_vlan_filter {
+ /* only the LSB */
+ u8 en;
+};
+
+struct dpni_cmd_vlan_id {
+ u8 flags;
+ u8 tc_id;
+ u8 flow_id;
+ u8 pad;
+ __le16 vlan_id;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 6ea7db66a632..aa429c17c343 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1225,6 +1225,99 @@ int dpni_get_port_mac_addr(struct fsl_mc_io *mc_io,
}
/**
+ * dpni_enable_vlan_filter() - Enable/disable VLAN filtering mode
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @en: Set to '1' to enable; '0' to disable
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u32 en)
+{
+ struct dpni_cmd_enable_vlan_filter *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE_VLAN_FILTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_enable_vlan_filter *)cmd.params;
+ dpni_set_field(cmd_params->en, ENABLE, en);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_add_vlan_id() - Add VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to add
+ * @flags: 0 - tc_id and flow_id will be ignored.
+ * Pkt with this vlan_id will be passed to the next
+ * classification stages
+ * DPNI_VLAN_SET_QUEUE_ACTION
+ * Pkt with this vlan_id will be forward directly to
+ * queue defined by the tc_id and flow_id
+ *
+ * @tc_id: Traffic class selection (0-7)
+ * @flow_id: Selects the specific queue out of the set allocated for the
+ * same as tc_id. Value must be in range 0 to NUM_QUEUES - 1
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->flags = flags;
+ cmd_params->tc_id = tc_id;
+ cmd_params->flow_id = flow_id;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpni_remove_vlan_id() - Remove VLAN ID filter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @vlan_id: VLAN ID to remove
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id)
+{
+ struct dpni_cmd_vlan_id *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpni_cmd_vlan_id *)cmd.params;
+ cmd_params->vlan_id = cpu_to_le16(vlan_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
* dpni_add_mac_addr() - Add MAC address filter
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index e7b9e195b534..4e96d9362dd2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1114,4 +1114,13 @@ int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
u16 token,
struct dpni_single_step_cfg *ptp_cfg);
+int dpni_enable_vlan_filter(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u32 en);
+
+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id, u8 flags, u8 tc_id, u8 flow_id);
+
+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u16 vlan_id);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index d99ea0f4e4a6..ab92382c399a 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -27,7 +27,7 @@ config FSL_ENETC_VF
config FSL_ENETC_MDIO
tristate "ENETC MDIO driver"
- depends on PCI
+ depends on PCI && MDIO_DEVRES && MDIO_BUS
help
This driver supports NXP ENETC Central MDIO controller as a PCIe
physical function (PF) device.
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index e1e950d48c92..c71fe8d751d5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSCAPR 0x1404
+#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
#define ENETC_PSIVLANFMR 0x1700
#define ENETC_PSIVLANFMR_VS BIT(0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index ee0116ed4738..70e6d97b380f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -14,23 +14,6 @@
#define ENETC_MDIO_DATA 0x8 /* MDIO data */
#define ENETC_MDIO_ADDR 0xc /* MDIO address */
-static inline u32 _enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
-{
- return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
-}
-
-static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
- u32 val)
-{
- enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
-}
-
-#define enetc_mdio_rd(mdio_priv, off) \
- _enetc_mdio_rd(mdio_priv, ENETC_##off)
-#define enetc_mdio_wr(mdio_priv, off, val) \
- _enetc_mdio_wr(mdio_priv, ENETC_##off, val)
-#define enetc_mdio_rd_reg(off) enetc_mdio_rd(mdio_priv, off)
-
#define MDIO_CFG_CLKDIV(x) ((((x) >> 1) & 0xff) << 8)
#define MDIO_CFG_BSY BIT(0)
#define MDIO_CFG_RD_ER BIT(1)
@@ -47,15 +30,29 @@ static inline void _enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
#define MDIO_CTL_DEV_ADDR(x) ((x) & 0x1f)
#define MDIO_CTL_PORT_ADDR(x) (((x) & 0x1f) << 5)
#define MDIO_CTL_READ BIT(15)
-#define MDIO_DATA(x) ((x) & 0xffff)
-#define TIMEOUT 1000
+static inline u32 enetc_mdio_rd(struct enetc_mdio_priv *mdio_priv, int off)
+{
+ return enetc_port_rd_mdio(mdio_priv->hw, mdio_priv->mdio_base + off);
+}
+
+static inline void enetc_mdio_wr(struct enetc_mdio_priv *mdio_priv, int off,
+ u32 val)
+{
+ enetc_port_wr_mdio(mdio_priv->hw, mdio_priv->mdio_base + off, val);
+}
+
+static bool enetc_mdio_is_busy(struct enetc_mdio_priv *mdio_priv)
+{
+ return enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_BSY;
+}
+
static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
{
- u32 val;
+ bool is_busy;
- return readx_poll_timeout(enetc_mdio_rd_reg, MDIO_CFG, val,
- !(val & MDIO_CFG_BSY), 10, 10 * TIMEOUT);
+ return readx_poll_timeout(enetc_mdio_is_busy, mdio_priv,
+ is_busy, !is_busy, 10, 10 * 1000);
}
int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
@@ -75,7 +72,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -83,11 +80,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
/* set port and dev addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -95,7 +92,7 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
}
/* write the value */
- enetc_mdio_wr(mdio_priv, MDIO_DATA, MDIO_DATA(value));
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -121,7 +118,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
mdio_cfg &= ~MDIO_CFG_ENC45;
}
- enetc_mdio_wr(mdio_priv, MDIO_CFG, mdio_cfg);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -129,11 +126,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* set port and device addr */
mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
@@ -141,21 +138,21 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
}
/* initiate the read */
- enetc_mdio_wr(mdio_priv, MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
ret = enetc_mdio_wait_complete(mdio_priv);
if (ret)
return ret;
/* return all Fs if nothing was there */
- if (enetc_mdio_rd(mdio_priv, MDIO_CFG) & MDIO_CFG_RD_ER) {
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
return 0xffff;
}
- value = enetc_mdio_rd(mdio_priv, MDIO_DATA) & 0xffff;
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
return value;
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index ed8fcb8b486e..515c5b29d7aa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -996,6 +996,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
phylink_destroy(priv->phylink);
}
+/* Initialize the entire shared memory for the flow steering entries
+ * of this port (PF + VFs)
+ */
+static int enetc_init_port_rfs_memory(struct enetc_si *si)
+{
+ struct enetc_cmd_rfse rfse = {0};
+ struct enetc_hw *hw = &si->hw;
+ int num_rfs, i, err = 0;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+
+ for (i = 0; i < num_rfs; i++) {
+ err = enetc_set_fs_entry(si, &rfse, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int enetc_init_port_rss_memory(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ int num_rss, err;
+ int *rss_table;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRSSCAPR);
+ num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
+ if (!num_rss)
+ return 0;
+
+ rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ err = enetc_set_rss_table(si, rss_table, num_rss);
+
+ kfree(rss_table);
+
+ return err;
+}
+
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1051,6 +1096,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res;
}
+ err = enetc_init_port_rfs_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+ goto err_init_port_rfs;
+ }
+
+ err = enetc_init_port_rss_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+ goto err_init_port_rss;
+ }
+
err = enetc_alloc_msix(priv);
if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n");
@@ -1079,6 +1136,8 @@ err_phylink_create:
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
enetc_free_msix(priv);
+err_init_port_rss:
+err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
@@ -1098,14 +1157,15 @@ static void enetc_pf_remove(struct pci_dev *pdev)
struct enetc_ndev_priv *priv;
priv = netdev_priv(si->ndev);
- enetc_phylink_destroy(priv);
- enetc_mdiobus_destroy(pf);
if (pf->num_vfs)
enetc_sriov_configure(pdev, 0);
unregister_netdev(si->ndev);
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+
enetc_free_msix(priv);
enetc_free_si_resources(priv);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 9ebdb0e54291..3db882322b2b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -945,7 +945,6 @@ static void
fec_restart(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u32 val;
u32 temp_mac[2];
u32 rcntl = OPT_FRAME_SIZE | 0x04;
u32 ecntl = 0x2; /* ETHEREN */
@@ -997,7 +996,8 @@ fec_restart(struct net_device *ndev)
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC) {
- val = readl(fep->hwp + FEC_RACC);
+ u32 val = readl(fep->hwp + FEC_RACC);
+
/* align IP header */
val |= FEC_RACC_SHIFT16;
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -1664,7 +1664,6 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
unsigned char *iap, tmpaddr[ETH_ALEN];
/*
@@ -1695,6 +1694,8 @@ static void fec_get_mac(struct net_device *ndev)
if (FEC_FLASHMAC)
iap = (unsigned char *)FEC_FLASHMAC;
#else
+ struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
+
if (pdata)
iap = (unsigned char *)&pdata->mac;
#endif
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index bb9887f98841..62f42921933d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -111,6 +111,7 @@ do { \
#define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */
#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */
+#define IF_MODE_MII 0x00000001 /* 30-31 MII interface */
#define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */
#define IF_MODE_RGMII 0x00000004
#define IF_MODE_RGMII_AUTO 0x00008000
@@ -442,6 +443,9 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg,
case PHY_INTERFACE_MODE_XGMII:
tmp |= IF_MODE_10G;
break;
+ case PHY_INTERFACE_MODE_MII:
+ tmp |= IF_MODE_MII;
+ break;
default:
tmp |= IF_MODE_GMII;
if (phy_if == PHY_INTERFACE_MODE_RGMII ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d391a45cebb6..541de32ea662 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -58,7 +58,6 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 6d853f018d53..ef4e2febeb5b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -70,9 +70,32 @@ static struct {
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)");
-static struct ucc_geth_info ugeth_primary_info = {
+static int ucc_geth_thread_count(enum ucc_geth_num_of_threads idx)
+{
+ static const u8 count[] = {
+ [UCC_GETH_NUM_OF_THREADS_1] = 1,
+ [UCC_GETH_NUM_OF_THREADS_2] = 2,
+ [UCC_GETH_NUM_OF_THREADS_4] = 4,
+ [UCC_GETH_NUM_OF_THREADS_6] = 6,
+ [UCC_GETH_NUM_OF_THREADS_8] = 8,
+ };
+ if (idx >= ARRAY_SIZE(count))
+ return 0;
+ return count[idx];
+}
+
+static inline int ucc_geth_tx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static inline int ucc_geth_rx_queues(const struct ucc_geth_info *info)
+{
+ return 1;
+}
+
+static const struct ucc_geth_info ugeth_primary_info = {
.uf_info = {
- .bd_mem_part = MEM_PART_SYSTEM,
.rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
.max_rx_buf_length = 1536,
/* adjusted at startup if max-speed 1000 */
@@ -90,8 +113,6 @@ static struct ucc_geth_info ugeth_primary_info = {
.tcrc = UCC_FAST_16_BIT_CRC,
.synl = UCC_FAST_SYNC_LEN_NOT_USED,
},
- .numQueuesTx = 1,
- .numQueuesRx = 1,
.extendedFilteringChainPointer = ((uint32_t) NULL),
.typeorlen = 3072 /*1536 */ ,
.nonBackToBackIfgPart1 = 0x40,
@@ -157,8 +178,6 @@ static struct ucc_geth_info ugeth_primary_info = {
.riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
};
-static struct ucc_geth_info ugeth_info[8];
-
#ifdef DEBUG
static void mem_disp(u8 *addr, int size)
{
@@ -558,7 +577,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
int i;
int length;
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
if (ugeth->p_tx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenTx[i] *
@@ -567,7 +586,7 @@ static void dump_bds(struct ucc_geth_private *ugeth)
mem_disp(ugeth->p_tx_bd_ring[i], length);
}
}
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
if (ugeth->p_rx_bd_ring[i]) {
length =
(ugeth->ug_info->bdRingLenRx[i] *
@@ -671,32 +690,12 @@ static void dump_regs(struct ucc_geth_private *ugeth)
in_be32(&ugeth->ug_regs->scam));
if (ugeth->p_thread_data_tx) {
- int numThreadsTxNumerical;
- switch (ugeth->ug_info->numThreadsTx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsTxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsTxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsTxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsTxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsTxNumerical = 8;
- break;
- default:
- numThreadsTxNumerical = 0;
- break;
- }
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsTx);
pr_info("Thread data TXs:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_tx);
- for (i = 0; i < numThreadsTxNumerical; i++) {
+ for (i = 0; i < count; i++) {
pr_info("Thread data TX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_tx[i]);
@@ -705,32 +704,12 @@ static void dump_regs(struct ucc_geth_private *ugeth)
}
}
if (ugeth->p_thread_data_rx) {
- int numThreadsRxNumerical;
- switch (ugeth->ug_info->numThreadsRx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsRxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsRxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsRxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsRxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsRxNumerical = 8;
- break;
- default:
- numThreadsRxNumerical = 0;
- break;
- }
+ int count = ucc_geth_thread_count(ugeth->ug_info->numThreadsRx);
pr_info("Thread data RX:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_thread_data_rx);
- for (i = 0; i < numThreadsRxNumerical; i++) {
+ for (i = 0; i < count; i++) {
pr_info("Thread data RX[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_thread_data_rx[i]);
@@ -905,7 +884,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
if (ugeth->p_send_q_mem_reg) {
pr_info("Send Q memory registers:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
pr_info("SQQD[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
@@ -937,7 +916,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
pr_info("RX IRQ coalescing tables:\n");
pr_info("Base address: 0x%08x\n",
(u32)ugeth->p_rx_irq_coalescing_tbl);
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
pr_info("RX IRQ coalescing table entry[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_irq_coalescing_tbl->
@@ -959,7 +938,7 @@ static void dump_regs(struct ucc_geth_private *ugeth)
if (ugeth->p_rx_bd_qs_tbl) {
pr_info("RX BD QS tables:\n");
pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
pr_info("RX BD QS table[%d]:\n", i);
pr_info("Base address: 0x%08x\n",
(u32)&ugeth->p_rx_bd_qs_tbl[i]);
@@ -1835,7 +1814,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ugeth->ug_info); i++) {
if (ugeth->p_rx_bd_ring[i]) {
/* Return existing data buffers in ring */
bd = ugeth->p_rx_bd_ring[i];
@@ -1856,12 +1835,7 @@ static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
kfree(ugeth->rx_skbuff[i]);
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->rx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->rx_bd_ring_offset[i]);
+ kfree(ugeth->p_rx_bd_ring[i]);
ugeth->p_rx_bd_ring[i] = NULL;
}
}
@@ -1880,7 +1854,7 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ugeth->ug_info); i++) {
bd = ugeth->p_tx_bd_ring[i];
if (!bd)
continue;
@@ -1898,15 +1872,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
kfree(ugeth->tx_skbuff[i]);
- if (ugeth->p_tx_bd_ring[i]) {
- if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_SYSTEM)
- kfree((void *)ugeth->tx_bd_ring_offset[i]);
- else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM)
- qe_muram_free(ugeth->tx_bd_ring_offset[i]);
- ugeth->p_tx_bd_ring[i] = NULL;
- }
+ kfree(ugeth->p_tx_bd_ring[i]);
+ ugeth->p_tx_bd_ring[i] = NULL;
}
}
@@ -1921,50 +1888,39 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
ugeth->uccf = NULL;
}
- if (ugeth->p_thread_data_tx) {
- qe_muram_free(ugeth->thread_dat_tx_offset);
- ugeth->p_thread_data_tx = NULL;
- }
- if (ugeth->p_thread_data_rx) {
- qe_muram_free(ugeth->thread_dat_rx_offset);
- ugeth->p_thread_data_rx = NULL;
- }
- if (ugeth->p_exf_glbl_param) {
- qe_muram_free(ugeth->exf_glbl_param_offset);
- ugeth->p_exf_glbl_param = NULL;
- }
- if (ugeth->p_rx_glbl_pram) {
- qe_muram_free(ugeth->rx_glbl_pram_offset);
- ugeth->p_rx_glbl_pram = NULL;
- }
- if (ugeth->p_tx_glbl_pram) {
- qe_muram_free(ugeth->tx_glbl_pram_offset);
- ugeth->p_tx_glbl_pram = NULL;
- }
- if (ugeth->p_send_q_mem_reg) {
- qe_muram_free(ugeth->send_q_mem_reg_offset);
- ugeth->p_send_q_mem_reg = NULL;
- }
- if (ugeth->p_scheduler) {
- qe_muram_free(ugeth->scheduler_offset);
- ugeth->p_scheduler = NULL;
- }
- if (ugeth->p_tx_fw_statistics_pram) {
- qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
- ugeth->p_tx_fw_statistics_pram = NULL;
- }
- if (ugeth->p_rx_fw_statistics_pram) {
- qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
- ugeth->p_rx_fw_statistics_pram = NULL;
- }
- if (ugeth->p_rx_irq_coalescing_tbl) {
- qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
- ugeth->p_rx_irq_coalescing_tbl = NULL;
- }
- if (ugeth->p_rx_bd_qs_tbl) {
- qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
- ugeth->p_rx_bd_qs_tbl = NULL;
- }
+ qe_muram_free_addr(ugeth->p_thread_data_tx);
+ ugeth->p_thread_data_tx = NULL;
+
+ qe_muram_free_addr(ugeth->p_thread_data_rx);
+ ugeth->p_thread_data_rx = NULL;
+
+ qe_muram_free_addr(ugeth->p_exf_glbl_param);
+ ugeth->p_exf_glbl_param = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_glbl_pram);
+ ugeth->p_rx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_glbl_pram);
+ ugeth->p_tx_glbl_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_send_q_mem_reg);
+ ugeth->p_send_q_mem_reg = NULL;
+
+ qe_muram_free_addr(ugeth->p_scheduler);
+ ugeth->p_scheduler = NULL;
+
+ qe_muram_free_addr(ugeth->p_tx_fw_statistics_pram);
+ ugeth->p_tx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_fw_statistics_pram);
+ ugeth->p_rx_fw_statistics_pram = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_irq_coalescing_tbl);
+ ugeth->p_rx_irq_coalescing_tbl = NULL;
+
+ qe_muram_free_addr(ugeth->p_rx_bd_qs_tbl);
+ ugeth->p_rx_bd_qs_tbl = NULL;
+
if (ugeth->p_init_enet_param_shadow) {
return_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -2073,15 +2029,8 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
- if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
- (uf_info->bd_mem_part == MEM_PART_MURAM))) {
- if (netif_msg_probe(ugeth))
- pr_err("Bad memory partition value\n");
- return -EINVAL;
- }
-
/* Rx BD lengths */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
(ug_info->bdRingLenRx[i] %
UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
@@ -2092,7 +2041,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
}
/* Tx BD lengths */
- for (i = 0; i < ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
if (netif_msg_probe(ugeth))
pr_err("Tx BD ring length must be no smaller than 2\n");
@@ -2109,14 +2058,14 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
}
/* num Tx queues */
- if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
+ if (ucc_geth_tx_queues(ug_info) > NUM_TX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of tx queues too large\n");
return -EINVAL;
}
/* num Rx queues */
- if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
+ if (ucc_geth_rx_queues(ug_info) > NUM_RX_QUEUES) {
if (netif_msg_probe(ugeth))
pr_err("number of rx queues too large\n");
return -EINVAL;
@@ -2124,7 +2073,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l2qt */
for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
- if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
+ if (ug_info->l2qt[i] >= ucc_geth_rx_queues(ug_info)) {
if (netif_msg_probe(ugeth))
pr_err("VLAN priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
@@ -2133,7 +2082,7 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* l3qt */
for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
- if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
+ if (ug_info->l3qt[i] >= ucc_geth_rx_queues(ug_info)) {
if (netif_msg_probe(ugeth))
pr_err("IP priority table entry must not be larger than number of Rx queues\n");
return -EINVAL;
@@ -2156,10 +2105,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
/* Generate uccm_mask for receive */
uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
- for (i = 0; i < ug_info->numQueuesRx; i++)
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i);
- for (i = 0; i < ug_info->numQueuesTx; i++)
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i);
/* Initialize the general fast UCC block. */
if (ucc_fast_init(uf_info, &ugeth->uccf)) {
@@ -2198,53 +2147,32 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
uf_info = &ug_info->uf_info;
/* Allocate Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
- /* Allocate in multiple of
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
- according to spec */
- length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd))
- / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) %
- UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
- length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_TX_BD_RING_ALIGNMENT;
- ugeth->tx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
-
- if (ugeth->tx_bd_ring_offset[j] != 0)
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->tx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_TX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
- ugeth->p_tx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- tx_bd_ring_offset[j]);
- }
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
+ u32 align = max(UCC_GETH_TX_BD_RING_ALIGNMENT,
+ UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT);
+ u32 alloc;
+
+ length = ug_info->bdRingLenTx[j] * sizeof(struct qe_bd);
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_tx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
+
if (!ugeth->p_tx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Tx bd rings\n");
return -ENOMEM;
}
/* Zero unused end of bd ring, according to spec */
- memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
- ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0,
- length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd));
+ memset(ugeth->p_tx_bd_ring[j] + length, 0, alloc - length);
}
/* Init Tx bds */
- for (j = 0; j < ug_info->numQueuesTx; j++) {
+ for (j = 0; j < ucc_geth_tx_queues(ug_info); j++) {
/* Setup the skbuff rings */
ugeth->tx_skbuff[j] =
- kmalloc_array(ugeth->ug_info->bdRingLenTx[j],
- sizeof(struct sk_buff *), GFP_KERNEL);
+ kcalloc(ugeth->ug_info->bdRingLenTx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->tx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
@@ -2252,9 +2180,6 @@ static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
- ugeth->tx_skbuff[j][i] = NULL;
-
ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
@@ -2284,27 +2209,15 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
uf_info = &ug_info->uf_info;
/* Allocate Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
+ u32 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
+ u32 alloc;
+
length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd);
- if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
- u32 align = 4;
- if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
- align = UCC_GETH_RX_BD_RING_ALIGNMENT;
- ugeth->rx_bd_ring_offset[j] =
- (u32) kmalloc((u32) (length + align), GFP_KERNEL);
- if (ugeth->rx_bd_ring_offset[j] != 0)
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
- align) & ~(align - 1));
- } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
- ugeth->rx_bd_ring_offset[j] =
- qe_muram_alloc(length,
- UCC_GETH_RX_BD_RING_ALIGNMENT);
- if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
- ugeth->p_rx_bd_ring[j] =
- (u8 __iomem *) qe_muram_addr(ugeth->
- rx_bd_ring_offset[j]);
- }
+ alloc = round_up(length, align);
+ alloc = roundup_pow_of_two(alloc);
+
+ ugeth->p_rx_bd_ring[j] = kmalloc(alloc, GFP_KERNEL);
if (!ugeth->p_rx_bd_ring[j]) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for Rx bd rings\n");
@@ -2313,11 +2226,11 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
}
/* Init Rx bds */
- for (j = 0; j < ug_info->numQueuesRx; j++) {
+ for (j = 0; j < ucc_geth_rx_queues(ug_info); j++) {
/* Setup the skbuff rings */
ugeth->rx_skbuff[j] =
- kmalloc_array(ugeth->ug_info->bdRingLenRx[j],
- sizeof(struct sk_buff *), GFP_KERNEL);
+ kcalloc(ugeth->ug_info->bdRingLenRx[j],
+ sizeof(struct sk_buff *), GFP_KERNEL);
if (ugeth->rx_skbuff[j] == NULL) {
if (netif_msg_ifup(ugeth))
@@ -2325,9 +2238,6 @@ static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
return -ENOMEM;
}
- for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
- ugeth->rx_skbuff[j][i] = NULL;
-
ugeth->skb_currx[j] = 0;
bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
@@ -2359,10 +2269,10 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
u32 init_enet_pram_offset, cecr_subblock, command;
u32 ifstat, i, j, size, l2qt, l3qt;
u16 temoder = UCC_GETH_TEMODER_INIT;
- u16 test;
u8 function_code = 0;
u8 __iomem *endOfRing;
u8 numThreadsRxNumerical, numThreadsTxNumerical;
+ s32 rx_glbl_pram_offset, tx_glbl_pram_offset;
ugeth_vdbg("%s: IN", __func__);
uccf = ugeth->uccf;
@@ -2371,45 +2281,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
uf_regs = uccf->uf_regs;
ug_regs = ugeth->ug_regs;
- switch (ug_info->numThreadsRx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsRxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsRxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsRxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsRxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsRxNumerical = 8;
- break;
- default:
+ numThreadsRxNumerical = ucc_geth_thread_count(ug_info->numThreadsRx);
+ if (!numThreadsRxNumerical) {
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Rx threads value\n");
return -EINVAL;
}
- switch (ug_info->numThreadsTx) {
- case UCC_GETH_NUM_OF_THREADS_1:
- numThreadsTxNumerical = 1;
- break;
- case UCC_GETH_NUM_OF_THREADS_2:
- numThreadsTxNumerical = 2;
- break;
- case UCC_GETH_NUM_OF_THREADS_4:
- numThreadsTxNumerical = 4;
- break;
- case UCC_GETH_NUM_OF_THREADS_6:
- numThreadsTxNumerical = 6;
- break;
- case UCC_GETH_NUM_OF_THREADS_8:
- numThreadsTxNumerical = 8;
- break;
- default:
+ numThreadsTxNumerical = ucc_geth_thread_count(ug_info->numThreadsTx);
+ if (!numThreadsTxNumerical) {
if (netif_msg_ifup(ugeth))
pr_err("Bad number of Tx threads value\n");
return -EINVAL;
@@ -2507,20 +2387,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
*/
/* Tx global PRAM */
/* Allocate global tx parameter RAM page */
- ugeth->tx_glbl_pram_offset =
+ tx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram),
UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
- if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
+ if (tx_glbl_pram_offset < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_tx_glbl_pram\n");
return -ENOMEM;
}
- ugeth->p_tx_glbl_pram =
- (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
- tx_glbl_pram_offset);
- /* Zero out p_tx_glbl_pram */
- memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
-
+ ugeth->p_tx_glbl_pram = qe_muram_addr(tx_glbl_pram_offset);
/* Fill global PRAM */
/* TQPTR */
@@ -2554,7 +2429,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* SQPTR */
/* Size varies with number of Tx queues */
ugeth->send_q_mem_reg_offset =
- qe_muram_alloc(ug_info->numQueuesTx *
+ qe_muram_alloc(ucc_geth_tx_queues(ug_info) *
sizeof(struct ucc_geth_send_queue_qd),
UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
@@ -2570,29 +2445,20 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Setup the table */
/* Assume BD rings are already established */
- for (i = 0; i < ug_info->numQueuesTx; i++) {
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++) {
endOfRing =
ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
1) * sizeof(struct qe_bd);
- if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
- last_bd_completed_address,
- (u32) virt_to_phys(endOfRing));
- } else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM) {
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
- (u32)qe_muram_dma(ugeth->p_tx_bd_ring[i]));
- out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
- last_bd_completed_address,
- (u32)qe_muram_dma(endOfRing));
- }
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
+ (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
+ out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
+ last_bd_completed_address,
+ (u32) virt_to_phys(endOfRing));
}
/* schedulerbasepointer */
- if (ug_info->numQueuesTx > 1) {
+ if (ucc_geth_tx_queues(ug_info) > 1) {
/* scheduler exists only if more than 1 tx queue */
ugeth->scheduler_offset =
qe_muram_alloc(sizeof(struct ucc_geth_scheduler),
@@ -2608,8 +2474,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
scheduler_offset);
out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
ugeth->scheduler_offset);
- /* Zero out p_scheduler */
- memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
/* Set values in scheduler */
out_be32(&ugeth->p_scheduler->mblinterval,
@@ -2652,23 +2516,18 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->p_tx_fw_statistics_pram =
(struct ucc_geth_tx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
- /* Zero out p_tx_fw_statistics_pram */
- memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
- 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram));
}
/* temoder */
/* Already has speed set */
- if (ug_info->numQueuesTx > 1)
+ if (ucc_geth_tx_queues(ug_info) > 1)
temoder |= TEMODER_SCHEDULER_ENABLE;
if (ug_info->ipCheckSumGenerate)
temoder |= TEMODER_IP_CHECKSUM_GENERATE;
- temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
+ temoder |= ((ucc_geth_tx_queues(ug_info) - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
- test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
-
/* Function code register value to be used later */
function_code = UCC_BMR_BO_BE | UCC_BMR_GBL;
/* Required for QE */
@@ -2678,20 +2537,15 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* Rx global PRAM */
/* Allocate global rx parameter RAM page */
- ugeth->rx_glbl_pram_offset =
+ rx_glbl_pram_offset =
qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram),
UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
- if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
+ if (rx_glbl_pram_offset < 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate DPRAM memory for p_rx_glbl_pram\n");
return -ENOMEM;
}
- ugeth->p_rx_glbl_pram =
- (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
- rx_glbl_pram_offset);
- /* Zero out p_rx_glbl_pram */
- memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
-
+ ugeth->p_rx_glbl_pram = qe_muram_addr(rx_glbl_pram_offset);
/* Fill global PRAM */
/* RQPTR */
@@ -2729,16 +2583,13 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->p_rx_fw_statistics_pram =
(struct ucc_geth_rx_firmware_statistics_pram __iomem *)
qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
- /* Zero out p_rx_fw_statistics_pram */
- memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
- sizeof(struct ucc_geth_rx_firmware_statistics_pram));
}
/* intCoalescingPtr */
/* Size varies with number of Rx queues */
ugeth->rx_irq_coalescing_tbl_offset =
- qe_muram_alloc(ug_info->numQueuesRx *
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
sizeof(struct ucc_geth_rx_interrupt_coalescing_entry)
+ 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
@@ -2754,7 +2605,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ugeth->rx_irq_coalescing_tbl_offset);
/* Fill interrupt coalescing table */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
interruptcoalescingmaxvalue,
ug_info->interruptcoalescingmaxvalue[i]);
@@ -2803,7 +2654,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
/* RBDQPTR */
/* Size varies with number of Rx queues */
ugeth->rx_bd_qs_tbl_offset =
- qe_muram_alloc(ug_info->numQueuesRx *
+ qe_muram_alloc(ucc_geth_rx_queues(ug_info) *
(sizeof(struct ucc_geth_rx_bd_queues_entry) +
sizeof(struct ucc_geth_rx_prefetched_bds)),
UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
@@ -2817,23 +2668,12 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
(struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
rx_bd_qs_tbl_offset);
out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
- /* Zero out p_rx_bd_qs_tbl */
- memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
- 0,
- ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) +
- sizeof(struct ucc_geth_rx_prefetched_bds)));
/* Setup the table */
/* Assume BD rings are already established */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
- if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
- out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
- } else if (ugeth->ug_info->uf_info.bd_mem_part ==
- MEM_PART_MURAM) {
- out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
- (u32)qe_muram_dma(ugeth->p_rx_bd_ring[i]));
- }
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
+ out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
+ (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
/* rest of fields handled by QE */
}
@@ -2854,7 +2694,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
ug_info->
vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
- remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
+ remoder |= ((ucc_geth_rx_queues(ug_info) - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
if (ug_info->ipCheckSumCheck)
remoder |= REMODER_IP_CHECKSUM_CHECK;
if (ug_info->ipAddressAlignment)
@@ -2937,14 +2777,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
* allocated resources can be released when the channel is freed.
*/
if (!(ugeth->p_init_enet_param_shadow =
- kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
+ kzalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) {
if (netif_msg_ifup(ugeth))
pr_err("Can not allocate memory for p_UccInitEnetParamShadows\n");
return -ENOMEM;
}
- /* Zero out *p_init_enet_param_shadow */
- memset((char *)ugeth->p_init_enet_param_shadow,
- 0, sizeof(struct ucc_geth_init_pram));
/* Fill shadow InitEnet command parameter structure */
@@ -2964,7 +2801,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
- ugeth->rx_glbl_pram_offset | ug_info->riscRx;
+ rx_glbl_pram_offset | ug_info->riscRx;
if ((ug_info->largestexternallookupkeysize !=
QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) &&
(ug_info->largestexternallookupkeysize !=
@@ -3002,7 +2839,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
ugeth->p_init_enet_param_shadow->txglobal =
- ugeth->tx_glbl_pram_offset | ug_info->riscTx;
+ tx_glbl_pram_offset | ug_info->riscTx;
if ((ret_val =
fill_init_enet_entries(ugeth,
&(ugeth->p_init_enet_param_shadow->
@@ -3016,7 +2853,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
}
/* Load Rx bds with buffers */
- for (i = 0; i < ug_info->numQueuesRx; i++) {
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++) {
if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
if (netif_msg_ifup(ugeth))
pr_err("Can not fill Rx bds with buffers\n");
@@ -3287,12 +3124,12 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
/* Tx event processing */
spin_lock(&ugeth->lock);
- for (i = 0; i < ug_info->numQueuesTx; i++)
+ for (i = 0; i < ucc_geth_tx_queues(ug_info); i++)
ucc_geth_tx(ugeth->ndev, i);
spin_unlock(&ugeth->lock);
howmany = 0;
- for (i = 0; i < ug_info->numQueuesRx; i++)
+ for (i = 0; i < ucc_geth_rx_queues(ug_info); i++)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
@@ -3685,6 +3522,36 @@ static const struct net_device_ops ucc_geth_netdev_ops = {
#endif
};
+static int ucc_geth_parse_clock(struct device_node *np, const char *which,
+ enum qe_clock *out)
+{
+ const char *sprop;
+ char buf[24];
+
+ snprintf(buf, sizeof(buf), "%s-clock-name", which);
+ sprop = of_get_property(np, buf, NULL);
+ if (sprop) {
+ *out = qe_clock_source(sprop);
+ } else {
+ u32 val;
+
+ snprintf(buf, sizeof(buf), "%s-clock", which);
+ if (of_property_read_u32(np, buf, &val)) {
+ /* If both *-clock-name and *-clock are missing,
+ * we want to tell people to use *-clock-name.
+ */
+ pr_err("missing %s-clock-name property\n", buf);
+ return -EINVAL;
+ }
+ *out = val;
+ }
+ if (*out < QE_CLK_NONE || *out > QE_CLK24) {
+ pr_err("invalid %s property\n", buf);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int ucc_geth_probe(struct platform_device* ofdev)
{
struct device *device = &ofdev->dev;
@@ -3695,7 +3562,6 @@ static int ucc_geth_probe(struct platform_device* ofdev)
struct resource res;
int err, ucc_num, max_speed = 0;
const unsigned int *prop;
- const char *sprop;
const void *mac_addr;
phy_interface_t phy_interface;
static const int enet_to_speed[] = {
@@ -3725,62 +3591,23 @@ static int ucc_geth_probe(struct platform_device* ofdev)
if ((ucc_num < 0) || (ucc_num > 7))
return -ENODEV;
- ug_info = &ugeth_info[ucc_num];
- if (ug_info == NULL) {
- if (netif_msg_probe(&debug))
- pr_err("[%d] Missing additional data!\n", ucc_num);
- return -ENODEV;
- }
+ ug_info = kmalloc(sizeof(*ug_info), GFP_KERNEL);
+ if (ug_info == NULL)
+ return -ENOMEM;
+ memcpy(ug_info, &ugeth_primary_info, sizeof(*ug_info));
ug_info->uf_info.ucc_num = ucc_num;
- sprop = of_get_property(np, "rx-clock-name", NULL);
- if (sprop) {
- ug_info->uf_info.rx_clock = qe_clock_source(sprop);
- if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) ||
- (ug_info->uf_info.rx_clock > QE_CLK24)) {
- pr_err("invalid rx-clock-name property\n");
- return -EINVAL;
- }
- } else {
- prop = of_get_property(np, "rx-clock", NULL);
- if (!prop) {
- /* If both rx-clock-name and rx-clock are missing,
- we want to tell people to use rx-clock-name. */
- pr_err("missing rx-clock-name property\n");
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid rx-clock property\n");
- return -EINVAL;
- }
- ug_info->uf_info.rx_clock = *prop;
- }
-
- sprop = of_get_property(np, "tx-clock-name", NULL);
- if (sprop) {
- ug_info->uf_info.tx_clock = qe_clock_source(sprop);
- if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) ||
- (ug_info->uf_info.tx_clock > QE_CLK24)) {
- pr_err("invalid tx-clock-name property\n");
- return -EINVAL;
- }
- } else {
- prop = of_get_property(np, "tx-clock", NULL);
- if (!prop) {
- pr_err("missing tx-clock-name property\n");
- return -EINVAL;
- }
- if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) {
- pr_err("invalid tx-clock property\n");
- return -EINVAL;
- }
- ug_info->uf_info.tx_clock = *prop;
- }
+ err = ucc_geth_parse_clock(np, "rx", &ug_info->uf_info.rx_clock);
+ if (err)
+ goto err_free_info;
+ err = ucc_geth_parse_clock(np, "tx", &ug_info->uf_info.tx_clock);
+ if (err)
+ goto err_free_info;
err = of_address_to_resource(np, 0, &res);
if (err)
- return -EINVAL;
+ goto err_free_info;
ug_info->uf_info.regs = res.start;
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
@@ -3793,7 +3620,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
*/
err = of_phy_register_fixed_link(np);
if (err)
- return err;
+ goto err_free_info;
ug_info->phy_node = of_node_get(np);
}
@@ -3924,6 +3751,8 @@ err_deregister_fixed_link:
of_phy_deregister_fixed_link(np);
of_node_put(ug_info->tbi_node);
of_node_put(ug_info->phy_node);
+err_free_info:
+ kfree(ug_info);
return err;
}
@@ -3940,6 +3769,7 @@ static int ucc_geth_remove(struct platform_device* ofdev)
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ kfree(ugeth->ug_info);
free_netdev(dev);
return 0;
@@ -3968,17 +3798,10 @@ static struct platform_driver ucc_geth_driver = {
static int __init ucc_geth_init(void)
{
- int i, ret;
-
if (netif_msg_drv(&debug))
pr_info(DRV_DESC "\n");
- for (i = 0; i < 8; i++)
- memcpy(&(ugeth_info[i]), &ugeth_primary_info,
- sizeof(ugeth_primary_info));
-
- ret = platform_driver_register(&ucc_geth_driver);
- return ret;
+ return platform_driver_register(&ucc_geth_driver);
}
static void __exit ucc_geth_exit(void)
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 11d4bf5dc21f..4294ed096ebb 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -1076,8 +1076,6 @@ struct ucc_geth_tad_params {
/* GETH protocol initialization structure */
struct ucc_geth_info {
struct ucc_fast_info uf_info;
- u8 numQueuesTx;
- u8 numQueuesRx;
int ipCheckSumCheck;
int ipCheckSumGenerate;
int rxExtendedFiltering;
@@ -1165,9 +1163,7 @@ struct ucc_geth_private {
struct ucc_geth_exf_global_pram __iomem *p_exf_glbl_param;
u32 exf_glbl_param_offset;
struct ucc_geth_rx_global_pram __iomem *p_rx_glbl_pram;
- u32 rx_glbl_pram_offset;
struct ucc_geth_tx_global_pram __iomem *p_tx_glbl_pram;
- u32 tx_glbl_pram_offset;
struct ucc_geth_send_queue_mem_region __iomem *p_send_q_mem_reg;
u32 send_q_mem_reg_offset;
struct ucc_geth_thread_data_tx __iomem *p_thread_data_tx;
@@ -1185,9 +1181,7 @@ struct ucc_geth_private {
struct ucc_geth_rx_bd_queues_entry __iomem *p_rx_bd_qs_tbl;
u32 rx_bd_qs_tbl_offset;
u8 __iomem *p_tx_bd_ring[NUM_TX_QUEUES];
- u32 tx_bd_ring_offset[NUM_TX_QUEUES];
u8 __iomem *p_rx_bd_ring[NUM_RX_QUEUES];
- u32 rx_bd_ring_offset[NUM_RX_QUEUES];
u8 __iomem *confBd[NUM_TX_QUEUES];
u8 __iomem *txBd[NUM_TX_QUEUES];
u8 __iomem *rxBd[NUM_RX_QUEUES];
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 858cb293152a..5d7824d2b4d4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1502,7 +1502,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- assert(skb->queue_mapping < ndev->ae_handle->q_num);
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
return hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a7daf6d4511e..e9e60a935f40 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -272,7 +272,7 @@ struct hnae3_ring_chain_node {
};
#define HNAE3_IS_TX_RING(node) \
- (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX)
+ (((node)->flag & 1 << HNAE3_RING_TYPE_B) == HNAE3_RING_TYPE_TX)
/* device specification info from firmware */
struct hnae3_dev_specs {
@@ -284,13 +284,14 @@ struct hnae3_dev_specs {
u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
u16 max_int_gl; /* max value of interrupt coalesce based on INT_GL */
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
+ u16 max_frm_size;
+ u16 max_qset_num;
};
struct hnae3_client_ops {
int (*init_instance)(struct hnae3_handle *handle);
void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
void (*link_status_change)(struct hnae3_handle *handle, bool state);
- int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
void (*process_hw_error)(struct hnae3_handle *handle,
@@ -410,8 +411,6 @@ struct hnae3_ae_dev {
* Get the len of the regs dump
* get_rss_key_size()
* Get rss key size
- * get_rss_indir_size()
- * Get rss indirection table size
* get_rss()
* Get rss table
* set_rss()
@@ -465,6 +464,8 @@ struct hnae3_ae_dev {
* Delete clsflower rule
* cls_flower_active
* Check if any cls flower rule exist
+ * dbg_read_cmd
+ * Execute debugfs read command.
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -553,7 +554,6 @@ struct hnae3_ae_ops {
int (*get_regs_len)(struct hnae3_handle *handle);
u32 (*get_rss_key_size)(struct hnae3_handle *handle);
- u32 (*get_rss_indir_size)(struct hnae3_handle *handle);
int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key,
u8 *hfunc);
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
@@ -620,6 +620,8 @@ struct hnae3_ae_ops {
int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys);
int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
+ int (*dbg_read_cmd)(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len);
pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
@@ -773,9 +775,13 @@ struct hnae3_handle {
#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
#define hnae3_set_bit(origin, shift, val) \
- hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+ hnae3_set_field(origin, 0x1 << (shift), shift, val)
#define hnae3_get_bit(origin, shift) \
- hnae3_get_field((origin), (0x1 << (shift)), (shift))
+ hnae3_get_field(origin, 0x1 << (shift), shift)
+
+#define HNAE3_DBG_TM_NODES "tm_nodes"
+#define HNAE3_DBG_TM_PRI "tm_priority"
+#define HNAE3_DBG_TM_QSET "tm_qset"
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 9d4e9c053a8f..dd11c57027bb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -7,7 +7,7 @@
#include "hnae3.h"
#include "hns3_enet.h"
-#define HNS3_DBG_READ_LEN 256
+#define HNS3_DBG_READ_LEN 65536
#define HNS3_DBG_WRITE_LEN 1024
static struct dentry *hns3_dbgfs_root;
@@ -162,7 +162,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
continue;
dev_info(&h->pdev->dev,
- " %4d %4d %4d\n",
+ " %4d %4u %4d\n",
i, global_qid, priv->ring[i].tqp_vector->vector_irq);
}
@@ -389,6 +389,9 @@ static void hns3_dbg_dev_specs(struct hnae3_handle *h)
kinfo->tc_info.num_tc);
dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
dev_info(priv->dev, "MAX INT GL: %u\n", dev_specs->max_int_gl);
+ dev_info(priv->dev, "MAX frame size: %u\n", dev_specs->max_frm_size);
+ dev_info(priv->dev, "MAX TM RATE: %uMbps\n", dev_specs->max_tm_rate);
+ dev_info(priv->dev, "MAX QSET number: %u\n", dev_specs->max_qset_num);
}
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
@@ -420,6 +423,30 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
return (*ppos = len);
}
+static int hns3_dbg_check_cmd(struct hnae3_handle *handle, char *cmd_buf)
+{
+ int ret = 0;
+
+ if (strncmp(cmd_buf, "help", 4) == 0)
+ hns3_dbg_help(handle);
+ else if (strncmp(cmd_buf, "queue info", 10) == 0)
+ ret = hns3_dbg_queue_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "queue map", 9) == 0)
+ ret = hns3_dbg_queue_map(handle);
+ else if (strncmp(cmd_buf, "bd info", 7) == 0)
+ ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "dev capability", 14) == 0)
+ hns3_dbg_dev_caps(handle);
+ else if (strncmp(cmd_buf, "dev spec", 8) == 0)
+ hns3_dbg_dev_specs(handle);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+ else
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -427,7 +454,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
struct hns3_nic_priv *priv = handle->priv;
char *cmd_buf, *cmd_buf_tmp;
int uncopied_bytes;
- int ret = 0;
+ int ret;
if (*ppos != 0)
return 0;
@@ -458,23 +485,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
count = cmd_buf_tmp - cmd_buf + 1;
}
- if (strncmp(cmd_buf, "help", 4) == 0)
- hns3_dbg_help(handle);
- else if (strncmp(cmd_buf, "queue info", 10) == 0)
- ret = hns3_dbg_queue_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "queue map", 9) == 0)
- ret = hns3_dbg_queue_map(handle);
- else if (strncmp(cmd_buf, "bd info", 7) == 0)
- ret = hns3_dbg_bd_info(handle, cmd_buf);
- else if (strncmp(cmd_buf, "dev capability", 14) == 0)
- hns3_dbg_dev_caps(handle);
- else if (strncmp(cmd_buf, "dev spec", 8) == 0)
- hns3_dbg_dev_specs(handle);
- else if (handle->ae_algo->ops->dbg_run_cmd)
- ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
- else
- ret = -EOPNOTSUPP;
-
+ ret = hns3_dbg_check_cmd(handle, cmd_buf);
if (ret)
hns3_dbg_help(handle);
@@ -484,6 +495,39 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
return count;
}
+static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hnae3_handle *handle = filp->private_data;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_nic_priv *priv = handle->priv;
+ char *cmd_buf, *read_buf;
+ ssize_t size = 0;
+ int ret = 0;
+
+ read_buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
+ if (!read_buf)
+ return -ENOMEM;
+
+ cmd_buf = filp->f_path.dentry->d_iname;
+
+ if (ops->dbg_read_cmd)
+ ret = ops->dbg_read_cmd(handle, cmd_buf, read_buf,
+ HNS3_DBG_READ_LEN);
+
+ if (ret) {
+ dev_info(priv->dev, "unknown command\n");
+ goto out;
+ }
+
+ size = simple_read_from_buffer(buffer, count, ppos, read_buf,
+ strlen(read_buf));
+
+out:
+ kfree(read_buf);
+ return size;
+}
+
static const struct file_operations hns3_dbg_cmd_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -491,14 +535,31 @@ static const struct file_operations hns3_dbg_cmd_fops = {
.write = hns3_dbg_cmd_write,
};
+static const struct file_operations hns3_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hns3_dbg_read,
+};
+
void hns3_dbg_init(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const char *name = pci_name(handle->pdev);
+ struct dentry *entry_dir;
handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
&hns3_dbg_cmd_fops);
+
+ entry_dir = debugfs_create_dir("tm", handle->hnae3_dbgfs);
+ if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2)
+ debugfs_create_file(HNAE3_DBG_TM_NODES, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
+ debugfs_create_file(HNAE3_DBG_TM_PRI, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
+ debugfs_create_file(HNAE3_DBG_TM_QSET, 0600, entry_dir, handle,
+ &hns3_dbg_fops);
}
void hns3_dbg_uninit(struct hnae3_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 405e49033417..bf4302a5cf95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -32,7 +32,7 @@
#define CREATE_TRACE_POINTS
#include "hns3_trace.h"
-#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
+#define hns3_set_field(origin, shift, val) ((origin) |= (val) << (shift))
#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
#define hns3_rl_err(fmt, ...) \
@@ -1070,7 +1070,7 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
* HW checksum of the non-IP packets and GSO packets is handled at
* different place in the following code
*/
- if (skb->csum_not_inet || skb_is_gso(skb) ||
+ if (skb_csum_is_sctp(skb) || skb_is_gso(skb) ||
!test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state))
return false;
@@ -2329,7 +2329,7 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
pci_ers_result_t ret;
- dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
+ dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
@@ -2800,12 +2800,6 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
-static bool hns3_page_is_reusable(struct page *page)
-{
- return page_to_nid(page) == numa_mem_id() &&
- !page_is_pfmemalloc(page);
-}
-
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
@@ -2823,10 +2817,11 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
- /* Avoid re-using remote pages, or the stack is still using the page
- * when page_offset rollback to zero, flag default unreuse
+ /* Avoid re-using remote and pfmemalloc pages, or the stack is still
+ * using the page when page_offset rollback to zero, flag default
+ * unreuse
*/
- if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
+ if (!dev_page_is_reusable(desc_cb->priv) ||
(!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
__page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
@@ -3083,8 +3078,8 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(hns3_page_is_reusable(desc_cb->priv)))
+ /* We can reuse buffer as-is, just make sure it is reusable */
+ if (dev_page_is_reusable(desc_cb->priv))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
__page_frag_cache_drain(desc_cb->priv,
@@ -4089,7 +4084,7 @@ out_when_alloc_ring_memory:
return -ENOMEM;
}
-int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
+static void hns3_uninit_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -4098,7 +4093,6 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
hns3_fini_ring(&priv->ring[i]);
hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
}
- return 0;
}
/* Set mac addr if it is configured. or leave it to the AE driver */
@@ -4286,8 +4280,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_dbg_init(handle);
- /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
- netdev->max_mtu = HNS3_MAX_MTU;
+ netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size);
if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps))
set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state);
@@ -4327,7 +4320,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
@@ -4353,9 +4345,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_nic_dealloc_vector_data(priv);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- netdev_err(netdev, "uninit ring error\n");
+ hns3_uninit_all_ring(priv);
hns3_put_ring_config(priv);
@@ -4384,20 +4374,6 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
}
}
-static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
-{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
- struct net_device *ndev = kinfo->netdev;
-
- if (tc > HNAE3_MAX_TC)
- return -EINVAL;
-
- if (!ndev)
- return -ENODEV;
-
- return hns3_nic_set_real_num_queue(ndev);
-}
-
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
@@ -4664,7 +4640,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int ret;
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
@@ -4682,13 +4657,11 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_nic_dealloc_vector_data(priv);
- ret = hns3_uninit_all_ring(priv);
- if (ret)
- netdev_err(netdev, "uninit ring error\n");
+ hns3_uninit_all_ring(priv);
hns3_put_ring_config(priv);
- return ret;
+ return 0;
}
static int hns3_reset_notify(struct hnae3_handle *handle,
@@ -4834,7 +4807,6 @@ static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
- .setup_tc = hns3_client_setup_tc,
.reset_notify = hns3_reset_notify,
.process_hw_error = hns3_process_hw_error,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 0a7b606e7c93..d069b04ee587 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -56,9 +56,8 @@ enum hns3_nic_state {
#define HNS3_RING_MIN_PENDING 72
#define HNS3_RING_BD_MULTIPLE 8
/* max frame size of mac */
-#define HNS3_MAC_MAX_FRAME 9728
-#define HNS3_MAX_MTU \
- (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
+#define HNS3_MAX_MTU(max_frm_size) \
+ ((max_frm_size) - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
@@ -555,7 +554,7 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
}
#define hns3_read_dev(a, reg) \
- hns3_read_reg((a)->io_base, (reg))
+ hns3_read_reg((a)->io_base, reg)
static inline bool hns3_nic_resetting(struct net_device *netdev)
{
@@ -565,7 +564,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
}
#define hns3_write_dev(a, reg, value) \
- hns3_write_reg((a)->io_base, (reg), (value))
+ hns3_write_reg((a)->io_base, reg, value)
#define ring_to_dev(ring) ((ring)->dev)
@@ -589,15 +588,15 @@ static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
/* iterator for handling rings in ring group */
#define hns3_for_each_ring(pos, head) \
- for (pos = (head).ring; pos; pos = pos->next)
+ for (pos = (head).ring; (pos); pos = (pos)->next)
#define hns3_get_handle(ndev) \
(((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
-#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_usec_to_reg(int_gl) ((int_gl) >> 1)
#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
-#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_usec_to_reg(int_rl) ((int_rl) >> 2)
#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
void hns3_ethtool_set_ops(struct net_device *netdev);
@@ -606,7 +605,6 @@ int hns3_set_channels(struct net_device *netdev,
void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
-int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
void hns3_fini_ring(struct hns3_enet_ring *ring);
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index e2fc443fe92c..adcec4ea7cb9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -456,7 +456,7 @@ static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
data[ETH_GSTRING_LEN - 1] = '\0';
/* first, prepend the prefix string */
- n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%u_",
prefix, i);
size_left = (ETH_GSTRING_LEN - 1) - n1;
@@ -859,11 +859,9 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- if (!h->ae_algo->ops->get_rss_indir_size)
- return 0;
-
- return h->ae_algo->ops->get_rss_indir_size(h);
+ return ae_dev->dev_specs.rss_ind_tbl_size;
}
static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index b728be4737f8..1bd0ddfaec4d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -189,38 +189,53 @@ static bool hclge_is_special_opcode(u16 opcode)
return false;
}
-static int hclge_cmd_convert_err_code(u16 desc_ret)
+struct errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num)
{
- switch (desc_ret) {
- case HCLGE_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGE_CMD_NO_AUTH:
- return -EPERM;
- case HCLGE_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGE_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGE_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGE_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGE_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGE_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGE_CMD_TIMEOUT:
- return -ETIME;
- case HCLGE_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGE_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGE_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
+ struct hclge_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
}
}
+static int hclge_cmd_convert_err_code(u16 desc_ret)
+{
+ struct errcode hclge_cmd_errcode[] = {
+ {HCLGE_CMD_EXEC_SUCCESS, 0},
+ {HCLGE_CMD_NO_AUTH, -EPERM},
+ {HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HCLGE_CMD_QUEUE_FULL, -EXFULL},
+ {HCLGE_CMD_NEXT_ERR, -ENOSR},
+ {HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
+ {HCLGE_CMD_PARA_ERR, -EINVAL},
+ {HCLGE_CMD_RESULT_ERR, -ERANGE},
+ {HCLGE_CMD_TIMEOUT, -ETIME},
+ {HCLGE_CMD_HILINK_ERR, -ENOLINK},
+ {HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HCLGE_CMD_INVALID, -EBADR},
+ };
+ u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclge_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc)
{
@@ -244,6 +259,44 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
return hclge_cmd_convert_err_code(desc_ret);
}
+static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num, int ntc)
+{
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
+ bool is_completed = false;
+ u32 timeout = 0;
+ int handle, ret;
+
+ /**
+ * If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclge_cmd_csq_done(hw)) {
+ is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclge_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
+}
+
/**
* hclge_cmd_send - send command to command queue
* @hw: pointer to the hw struct
@@ -257,11 +310,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq;
- struct hclge_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int retval;
+ int ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -285,49 +334,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
+
+ hclge_cmd_copy_desc(hw, desc, num);
/* Write to hardware */
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
- /**
- * If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclge_cmd_csq_done(hw)) {
- complete = true;
- break;
- }
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (!complete)
- retval = -EBADE;
- else
- retval = hclge_cmd_check_retval(hw, desc, num, ntc);
-
- /* Clean the command send queue */
- handle = hclge_cmd_csq_clean(hw);
- if (handle < 0)
- retval = handle;
- else if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
+ ret = hclge_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
- return retval;
+ return ret;
}
static void hclge_set_default_capability(struct hclge_dev *hdev)
@@ -363,6 +380,15 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps);
}
+static __le32 hclge_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGE_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
static enum hclge_cmd_status
hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
{
@@ -373,6 +399,7 @@ hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_query_version_cmd *)desc.data;
+ resp->api_caps = hclge_build_api_caps();
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index edfadb5cb1c3..ff52a65b4cff 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -160,6 +160,7 @@ enum hclge_opcode_type {
HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_TM_NODES = 0x0816,
HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
HCLGE_OPC_QSET_DFX_STS = 0x0844,
HCLGE_OPC_PRI_DFX_STS = 0x0845,
@@ -385,11 +386,15 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
};
+enum HCLGE_API_CAP_BITS {
+ HCLGE_API_CAP_FLEX_RSS_TBL_B,
+};
+
#define HCLGE_QUERY_CAP_LENGTH 3
struct hclge_query_version_cmd {
__le32 firmware;
__le32 hardware;
- __le32 rsv;
+ __le32 api_caps;
__le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
};
@@ -1126,7 +1131,8 @@ struct hclge_dev_specs_0_cmd {
#define HCLGE_DEF_MAX_INT_GL 0x1FE0U
struct hclge_dev_specs_1_cmd {
- __le32 rsv0;
+ __le16 max_frm_size;
+ __le16 max_qset_num;
__le16 max_int_gl;
u8 rsv1[18];
};
@@ -1138,9 +1144,9 @@ static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
}
#define hclge_write_dev(a, reg, value) \
- hclge_write_reg((a)->io_base, (reg), (value))
+ hclge_write_reg((a)->io_base, reg, value)
#define hclge_read_dev(a, reg) \
- hclge_read_reg((a)->io_base, (reg))
+ hclge_read_reg((a)->io_base, reg)
static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index e08d11b8ecf1..5bf5db91d16c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -176,29 +176,6 @@ static int hclge_map_update(struct hclge_dev *hdev)
return hclge_rss_init_hw(hdev);
}
-static int hclge_client_setup_tc(struct hclge_dev *hdev)
-{
- struct hclge_vport *vport = hdev->vport;
- struct hnae3_client *client;
- struct hnae3_handle *handle;
- int ret;
- u32 i;
-
- for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
- handle = &vport[i].nic;
- client = handle->client;
-
- if (!client || !client->ops || !client->ops->setup_tc)
- continue;
-
- ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static int hclge_notify_down_uinit(struct hclge_dev *hdev)
{
int ret;
@@ -257,10 +234,6 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
if (ret)
goto err_out;
- ret = hclge_client_setup_tc(hdev);
- if (ret)
- goto err_out;
-
ret = hclge_notify_init_up(hdev);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 8f6dea5198cf..6b1d197df881 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -696,17 +696,16 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
u32 qset_mapping[HCLGE_BP_EXT_GRP_NUM];
struct hclge_qs_to_pri_link_cmd *map;
struct hclge_tqp_tx_queue_tc_cmd *tc;
+ u16 group_id, queue_id, qset_id;
enum hclge_opcode_type cmd;
+ u8 grp_num, pri_id, tc_id;
struct hclge_desc desc;
- int queue_id, group_id;
- int tc_id, qset_id;
- int pri_id, ret;
u16 qs_id_l;
u16 qs_id_h;
- u8 grp_num;
+ int ret;
u32 i;
- ret = kstrtouint(cmd_buf, 0, &queue_id);
+ ret = kstrtou16(cmd_buf, 0, &queue_id);
queue_id = (ret != 0) ? 0 : queue_id;
cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
@@ -754,7 +753,7 @@ static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
tc_id = tc->tc_id & 0x7;
dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
- dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
+ dev_info(&hdev->pdev->dev, "%04u | %04u | %02u | %02u\n",
queue_id, qset_id, pri_id, tc_id);
if (!hnae3_dev_dcb_supported(hdev)) {
@@ -800,6 +799,140 @@ err_tm_map_cmd_send:
cmd, ret);
}
+static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int pos = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump tm nodes, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+
+ pos += scnprintf(buf + pos, len - pos, " BASE_ID MAX_NUM\n");
+ pos += scnprintf(buf + pos, len - pos, "PG %4u %4u\n",
+ nodes->pg_base_id, nodes->pg_num);
+ pos += scnprintf(buf + pos, len - pos, "PRI %4u %4u\n",
+ nodes->pri_base_id, nodes->pri_num);
+ pos += scnprintf(buf + pos, len - pos, "QSET %4u %4u\n",
+ le16_to_cpu(nodes->qset_base_id),
+ le16_to_cpu(nodes->qset_num));
+ pos += scnprintf(buf + pos, len - pos, "QUEUE %4u %4u\n",
+ le16_to_cpu(nodes->queue_base_id),
+ le16_to_cpu(nodes->queue_num));
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_pri_shaper_para c_shaper_para;
+ struct hclge_pri_shaper_para p_shaper_para;
+ u8 pri_num, sch_mode, weight;
+ char *sch_mode_str;
+ int pos = 0;
+ int ret;
+ u8 i;
+
+ ret = hclge_tm_get_pri_num(hdev, &pri_num);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos,
+ "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B ");
+ pos += scnprintf(buf + pos, len - pos,
+ "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U ");
+ pos += scnprintf(buf + pos, len - pos,
+ "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n");
+
+ for (i = 0; i < pri_num; i++) {
+ ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_C_SHAPPING,
+ &c_shaper_para);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_pri_shaper(hdev, i,
+ HCLGE_OPC_TM_PRI_P_SHAPPING,
+ &p_shaper_para);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4s %3u %3u %3u %3u ",
+ i, sch_mode_str, weight, c_shaper_para.ir_b,
+ c_shaper_para.ir_u, c_shaper_para.ir_s);
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %1u %6u ",
+ c_shaper_para.bs_b, c_shaper_para.bs_s,
+ c_shaper_para.flag, c_shaper_para.rate);
+ pos += scnprintf(buf + pos, len - pos,
+ "%3u %3u %3u %3u %3u ",
+ p_shaper_para.ir_b, p_shaper_para.ir_u,
+ p_shaper_para.ir_s, p_shaper_para.bs_b,
+ p_shaper_para.bs_s);
+ pos += scnprintf(buf + pos, len - pos, "%1u %6u\n",
+ p_shaper_para.flag, p_shaper_para.rate);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_tm_qset(struct hclge_dev *hdev, char *buf, int len)
+{
+ u8 priority, link_vld, sch_mode, weight;
+ char *sch_mode_str;
+ int ret, pos;
+ u16 qset_num;
+ u16 i;
+
+ ret = hclge_tm_get_qset_num(hdev, &qset_num);
+ if (ret)
+ return ret;
+
+ pos = scnprintf(buf, len, "ID MAP_PRI LINK_VLD MODE DWRR\n");
+
+ for (i = 0; i < qset_num; i++) {
+ ret = hclge_tm_get_qset_map_pri(hdev, i, &priority, &link_vld);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_sch_mode(hdev, i, &sch_mode);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_get_qset_weight(hdev, i, &weight);
+ if (ret)
+ return ret;
+
+ sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" :
+ "sp";
+ pos += scnprintf(buf + pos, len - pos,
+ "%04u %4u %1u %4s %3u\n",
+ i, priority, link_vld, sch_mode_str, weight);
+ }
+
+ return 0;
+}
+
static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
{
struct hclge_cfg_pause_param_cmd *pause_param;
@@ -851,39 +984,39 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
}
-static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
- struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
- struct hclge_rx_priv_wl_buf *rx_priv_wl;
- struct hclge_rx_com_wl *rx_packet_cnt;
- struct hclge_rx_com_thrd *rx_com_thrd;
- struct hclge_rx_com_wl *rx_com_wl;
- enum hclge_opcode_type cmd;
- struct hclge_desc desc[2];
+ struct hclge_desc desc;
int i, ret;
- cmd = HCLGE_OPC_TX_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
-
- tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
- cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_desc desc;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "\n");
- rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(rx_buf_cmd->buf_num[i]));
@@ -891,43 +1024,61 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
le16_to_cpu(rx_buf_cmd->shared_buf));
- cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_wl *rx_com_wl;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
- rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_com_wl->com_wl.high),
le16_to_cpu(rx_com_wl->com_wl.low));
- cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
- hclge_cmd_setup_basic_desc(desc, cmd, true);
- ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
- rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_packet_cnt->com_wl.high),
le16_to_cpu(rx_packet_cnt->com_wl.low));
- dev_info(&hdev->pdev->dev, "\n");
- if (!hnae3_dev_dcb_supported(hdev)) {
- dev_info(&hdev->pdev->dev,
- "Only DCB-supported dev supports rx priv wl\n");
- return;
- }
- cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
@@ -944,13 +1095,21 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low));
- cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
- hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ return 0;
+}
+
+static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
- hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret)
- goto err_qos_cmd_send;
+ return ret;
dev_info(&hdev->pdev->dev, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
@@ -967,6 +1126,52 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+
+ return 0;
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+ enum hclge_opcode_type cmd;
+ int ret;
+
+ cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+ ret = hclge_dbg_dump_tx_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+ ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ ret = hclge_dbg_dump_rx_common_wl_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
+
+ cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+ ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+ ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev);
+ if (ret)
+ goto err_qos_cmd_send;
+
return;
err_qos_cmd_send:
@@ -1465,8 +1670,6 @@ static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
const char *cmd_buf)
{
-#define HCLGE_MAX_QSET_NUM 1024
-
u16 qsid;
int ret;
@@ -1476,9 +1679,9 @@ static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
return;
}
- if (qsid >= HCLGE_MAX_QSET_NUM) {
- dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
- qsid);
+ if (qsid >= hdev->ae_dev->dev_specs.max_qset_num) {
+ dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-%u]\n",
+ qsid, hdev->ae_dev->dev_specs.max_qset_num - 1);
return;
}
@@ -1591,3 +1794,22 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
return 0;
}
+
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (strncmp(cmd_buf, HNAE3_DBG_TM_NODES,
+ strlen(HNAE3_DBG_TM_NODES)) == 0)
+ return hclge_dbg_dump_tm_nodes(hdev, buf, len);
+ else if (strncmp(cmd_buf, HNAE3_DBG_TM_PRI,
+ strlen(HNAE3_DBG_TM_PRI)) == 0)
+ return hclge_dbg_dump_tm_pri(hdev, buf, len);
+ else if (strncmp(cmd_buf, HNAE3_DBG_TM_QSET,
+ strlen(HNAE3_DBG_TM_QSET)) == 0)
+ return hclge_dbg_dump_tm_qset(hdev, buf, len);
+
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 9ee55ee0487d..0ca7f1b984bf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1073,7 +1073,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
* This function querys number of mpf and pf buffer descriptors.
*/
static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
- int *mpf_bd_num, int *pf_bd_num)
+ u32 *mpf_bd_num, u32 *pf_bd_num)
{
struct device *dev = &hdev->pdev->dev;
u32 mpf_min_bd_num, pf_min_bd_num;
@@ -1102,7 +1102,7 @@ static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
*mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
*pf_bd_num = le32_to_cpu(desc_bd.data[1]);
if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
- dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n",
+ dev_err(dev, "Invalid bd num: mpf(%u), pf(%u)\n",
*mpf_bd_num, *pf_bd_num);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index c242883fea5d..34b744df6709 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>
+#include <net/ipv6.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -24,7 +25,7 @@
#include "hnae3.h"
#define HCLGE_NAME "hclge"
-#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset)))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256U
@@ -55,8 +56,6 @@
#define HCLGE_LINK_STATUS_MS 10
-#define HCLGE_VF_VPORT_START_NUM 1
-
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
@@ -628,7 +627,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -636,7 +635,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
struct hclge_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff = buff + ETH_GSTRING_LEN;
}
@@ -930,7 +929,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
return 0;
}
-static int hclge_parse_speed(int speed_cmd, int *speed)
+static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
{
switch (speed_cmd) {
case 6:
@@ -1373,6 +1372,8 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
+ ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1391,7 +1392,9 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+ ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1406,8 +1409,12 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
if (!dev_specs->max_tm_rate)
dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+ if (!dev_specs->max_qset_num)
+ dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -4237,11 +4244,6 @@ static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
return HCLGE_RSS_KEY_SIZE;
}
-static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
-{
- return HCLGE_RSS_IND_TBL_SIZE;
-}
-
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
@@ -4283,6 +4285,7 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
{
struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
+ int rss_cfg_tbl_num;
u8 rss_msb_oft;
u8 rss_msb_val;
int ret;
@@ -4291,8 +4294,10 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u16 *indir)
u32 j;
req = (struct hclge_rss_indirection_table_cmd *)desc.data;
+ rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGE_RSS_CFG_TBL_SIZE;
- for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
hclge_cmd_setup_basic_desc
(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
@@ -4398,6 +4403,7 @@ static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
u8 *key, u8 *hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
int i;
@@ -4422,7 +4428,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
/* Get indirect table */
if (indir)
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = vport->rss_indirection_tbl[i];
return 0;
@@ -4431,6 +4437,7 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u8 hash_algo;
@@ -4462,7 +4469,7 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* Update the shadow RSS table with user specified qids */
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport->rss_indirection_tbl[i] = indir[i];
/* Update the hardware */
@@ -4494,22 +4501,12 @@ static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclge_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_init_rss_tuple_cmd(struct hclge_vport *vport,
+ struct ethtool_rxnfc *nfc,
+ struct hclge_rss_input_tuple_cmd *req)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_rss_input_tuple_cmd *req;
- struct hclge_desc desc;
u8 tuple_sets;
- int ret;
-
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclge_rss_input_tuple_cmd *)desc.data;
- hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
@@ -4554,6 +4551,32 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclge_init_rss_tuple_cmd(vport, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -4573,52 +4596,69 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclge_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclge_get_vport_rss_tuple(struct hclge_vport *vport, int flow_type,
+ u8 *tuple_sets)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
- u8 tuple_sets;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
+ *tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclge_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGE_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGE_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGE_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGE_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u8 tuple_sets;
+ int ret;
+
+ nfc->data = 0;
+
+ ret = hclge_get_vport_rss_tuple(vport, nfc->flow_type, &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclge_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -4703,14 +4743,15 @@ void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
int i, j;
for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
vport[j].rss_indirection_tbl[i] =
i % vport[j].alloc_rss_size;
}
}
-static void hclge_rss_init_cfg(struct hclge_dev *hdev)
+static int hclge_rss_init_cfg(struct hclge_dev *hdev)
{
+ u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
@@ -4718,6 +4759,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ u16 *rss_ind_tbl;
+
vport[i].rss_tuple_sets.ipv4_tcp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv4_udp_en =
@@ -4739,11 +4782,19 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_algo = rss_algo;
+ rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ vport[i].rss_indirection_tbl = rss_ind_tbl;
memcpy(vport[i].rss_hash_key, hclge_hash_key,
HCLGE_RSS_KEY_SIZE);
}
hclge_rss_indir_init_cfg(hdev);
+
+ return 0;
}
int hclge_bind_ring_with_vector(struct hclge_vport *vport,
@@ -5491,12 +5542,10 @@ static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
BIT(INNER_IP_TOS);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->psrc)
@@ -5521,12 +5570,10 @@ static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
/* check whether src/dst ip address used */
- if (!spec->ip6src[0] && !spec->ip6src[1] &&
- !spec->ip6src[2] && !spec->ip6src[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
*unused_tuple |= BIT(INNER_SRC_IP);
- if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
- !spec->ip6dst[2] && !spec->ip6dst[3])
+ if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
*unused_tuple |= BIT(INNER_DST_IP);
if (!spec->l4_proto)
@@ -5578,7 +5625,7 @@ static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
if (fs->m_ext.vlan_tci &&
be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
dev_err(&hdev->pdev->dev,
- "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
return -EINVAL;
}
@@ -8306,36 +8353,18 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev)
}
}
-void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
- enum HCLGE_MAC_ADDR_TYPE mac_type)
+static void hclge_build_del_list(struct list_head *list,
+ bool is_del_list,
+ struct list_head *tmp_del_list)
{
- int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
struct hclge_mac_node *mac_cfg, *tmp;
- struct hclge_dev *hdev = vport->back;
- struct list_head tmp_del_list, *list;
- int ret;
-
- if (mac_type == HCLGE_MAC_ADDR_UC) {
- list = &vport->uc_mac_list;
- unsync = hclge_rm_uc_addr_common;
- } else {
- list = &vport->mc_mac_list;
- unsync = hclge_rm_mc_addr_common;
- }
-
- INIT_LIST_HEAD(&tmp_del_list);
-
- if (!is_del_list)
- set_bit(vport->vport_id, hdev->vport_config_block);
-
- spin_lock_bh(&vport->mac_list_lock);
list_for_each_entry_safe(mac_cfg, tmp, list, node) {
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
list_del(&mac_cfg->node);
- list_add_tail(&mac_cfg->node, &tmp_del_list);
+ list_add_tail(&mac_cfg->node, tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
if (is_del_list) {
@@ -8345,10 +8374,18 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
break;
}
}
+}
- spin_unlock_bh(&vport->mac_list_lock);
+static void hclge_unsync_del_list(struct hclge_vport *vport,
+ int (*unsync)(struct hclge_vport *vport,
+ const unsigned char *addr),
+ bool is_del_list,
+ struct list_head *tmp_del_list)
+{
+ struct hclge_mac_node *mac_cfg, *tmp;
+ int ret;
- list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
ret = unsync(vport, mac_cfg->mac_addr);
if (!ret || ret == -ENOENT) {
/* clear all mac addr from hardware, but remain these
@@ -8366,6 +8403,35 @@ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
mac_cfg->state = HCLGE_MAC_TO_DEL;
}
}
+}
+
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_build_del_list(list, is_del_list, &tmp_del_list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
spin_lock_bh(&vport->mac_list_lock);
@@ -8772,32 +8838,16 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
}
-static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
- bool is_kill, u16 vlan,
- __be16 proto)
+static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ struct hclge_desc *desc)
{
- struct hclge_vport *vport = &hdev->vport[vfid];
struct hclge_vlan_filter_vf_cfg_cmd *req0;
struct hclge_vlan_filter_vf_cfg_cmd *req1;
- struct hclge_desc desc[2];
u8 vf_byte_val;
u8 vf_byte_off;
int ret;
- /* if vf vlan table is full, firmware will close vf vlan filter, it
- * is unable and unnecessary to add new vlan id to vf vlan filter.
- * If spoof check is enable, and vf vlan is full, it shouldn't add
- * new vlan, because tx packets with these vlan id will be dropped.
- */
- if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
- if (vport->vf_info.spoofchk && vlan) {
- dev_err(&hdev->pdev->dev,
- "Can't add vlan due to spoof check is on and vf vlan table is full\n");
- return -EPERM;
- }
- return 0;
- }
-
hclge_cmd_setup_basic_desc(&desc[0],
HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
hclge_cmd_setup_basic_desc(&desc[1],
@@ -8827,12 +8877,22 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
return ret;
}
+ return 0;
+}
+
+static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_vf_cfg_cmd *req;
+
+ req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+
if (!is_kill) {
#define HCLGE_VF_VLAN_NO_ENTRY 2
- if (!req0->resp_code || req0->resp_code == 1)
+ if (!req->resp_code || req->resp_code == 1)
return 0;
- if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
set_bit(vfid, hdev->vf_vlan_full);
dev_warn(&hdev->pdev->dev,
"vf vlan table is full, vf vlan filter is disabled\n");
@@ -8841,10 +8901,10 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
dev_err(&hdev->pdev->dev,
"Add vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
} else {
#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
- if (!req0->resp_code)
+ if (!req->resp_code)
return 0;
/* vf vlan filter is disabled when vf vlan table is full,
@@ -8852,17 +8912,46 @@ static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
* Just return 0 without warning, avoid massive verbose
* print logs when unload.
*/
- if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
+ if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
return 0;
dev_err(&hdev->pdev->dev,
"Kill vf vlan filter fail, ret =%u.\n",
- req0->resp_code);
+ req->resp_code);
}
return -EIO;
}
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ __be16 proto)
+{
+ struct hclge_vport *vport = &hdev->vport[vfid];
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
+ if (ret)
+ return ret;
+
+ return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
+}
+
static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
u16 vlan_id, bool is_kill)
{
@@ -9664,7 +9753,7 @@ int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
/* HW supprt 2 layer vlan */
max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
+ max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
return -EINVAL;
max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
@@ -9813,12 +9902,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
+ struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
+ queue_id);
+ return;
+ }
+
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
@@ -10581,7 +10677,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
- hclge_rss_init_cfg(hdev);
+ ret = hclge_rss_init_cfg(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
ret = hclge_rss_init_hw(hdev);
if (ret) {
dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
@@ -10809,7 +10910,7 @@ static void hclge_reset_vf_rate(struct hclge_dev *hdev)
}
}
-static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
int min_tx_rate, int max_tx_rate)
{
if (min_tx_rate != 0 ||
@@ -10830,7 +10931,7 @@ static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
struct hclge_dev *hdev = vport->back;
int ret;
- ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
if (ret)
return ret;
@@ -11072,6 +11173,7 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
@@ -11115,11 +11217,12 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
- rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
+ GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
- for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
ret = hclge_set_rss(handle, rss_indir, NULL, 0);
@@ -11799,7 +11902,6 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_get_rss_key_size,
- .get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
.set_rss_tuple = hclge_set_rss_tuple,
@@ -11850,6 +11952,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.enable_fd = hclge_enable_fd,
.add_arfs_entry = hclge_add_fd_entry_by_arfs,
.dbg_run_cmd = hclge_dbg_run_cmd,
+ .dbg_read_cmd = hclge_dbg_read_cmd,
.handle_hw_ras_error = hclge_handle_hw_ras_error,
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ca46bc9110d7..19d7f28773f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -17,6 +17,8 @@
#define HCLGE_MAX_PF_NUM 8
+#define HCLGE_VF_VPORT_START_NUM 1
+
#define HCLGE_RD_FIRST_STATS_NUM 2
#define HCLGE_RD_OTHER_STATS_NUM 4
@@ -44,15 +46,12 @@
#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
-#define HCLGE_CMDQ_INTR_SRC_REG 0x27100
#define HCLGE_CMDQ_INTR_STS_REG 0x27104
#define HCLGE_CMDQ_INTR_EN_REG 0x27108
#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
/* bar registers for common func */
#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
-#define HCLGE_RAS_OTHER_STS_REG 0x20B00
-#define HCLGE_FUNC_RESET_STS_REG 0x20C00
#define HCLGE_GRO_EN_REG 0x28000
/* bar registers for rcb */
@@ -97,8 +96,6 @@
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
-#define HCLGE_RSS_CFG_TBL_NUM \
- (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
@@ -148,6 +145,8 @@
/* Factor used to calculate offset and bitmap of VF num */
#define HCLGE_VF_NUM_PER_CMD 64
+#define HCLGE_MAX_QSET_NUM 1024
+
enum HLCGE_PORT_TYPE {
HOST_PORT,
NETWORK_PORT
@@ -726,7 +725,7 @@ struct hclge_vf_vlan_cfg {
* x = (~k) & v
* y = (k ^ ~v) & k
*/
-#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_x(x, k, v) (x = ~(k) & (v))
#define calc_y(y, k, v) \
do { \
const typeof(k) _k_ = (k); \
@@ -922,7 +921,7 @@ struct hclge_vport {
u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
/* User configured lookup table entries */
- u16 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
+ u16 *rss_indirection_tbl;
int rss_algo; /* User configured hash algorithm */
/* User configured rss tuple sets */
struct hclge_rss_tuple_cfg rss_tuple_sets;
@@ -1006,6 +1005,8 @@ int hclge_vport_start(struct hclge_vport *vport);
void hclge_vport_stop(struct hclge_vport *vport);
int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
+int hclge_dbg_read_cmd(struct hnae3_handle *handle, const char *cmd_buf,
+ char *buf, int len);
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
int hclge_notify_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 754c09ada901..51a36e74f088 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -56,7 +56,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
resp_pf_to_vf->msg.resp_status = resp;
} else {
dev_warn(&hdev->pdev->dev,
- "failed to send response to VF, response status %d is out-of-bound\n",
+ "failed to send response to VF, response status %u is out-of-bound\n",
resp);
resp_pf_to_vf->msg.resp_status = EIO;
}
@@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx(
struct hclge_vport *vport)
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ struct hclge_dev *hdev = vport->back;
int ring_num;
- int i = 0;
+ int i;
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM;
+ for (i = 0; i < ring_num; i++) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+ req->msg.param[i].tqp_index,
+ vport->nic.kinfo.rss_size - 1);
+ return -EINVAL;
+ }
+ }
+
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
- req->msg.param[i].ring_type);
+ req->msg.param[0].ring_type);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
- [req->msg.param[i].tqp_index]);
+ [req->msg.param[0].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
+ HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
cur_chain = ring_chain;
@@ -597,6 +607,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
index = mbx_req->msg.data[0];
+ /* Check the query index of rss_hash_key from VF, make sure no
+ * more than the size of rss_hash_key.
+ */
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+ sizeof(vport[0].rss_hash_key)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to get the rss hash key, the index(%u) invalid !\n",
+ index);
+ return;
+ }
+
memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 82742a64f3b7..151afd1f0688 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -41,8 +41,9 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
struct hclge_shaper_ir_para *ir_para,
u32 max_tm_rate)
{
+#define DEFAULT_SHAPER_IR_B 126
#define DIVISOR_CLK (1000 * 8)
-#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
+#define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
6 * 256, /* Prioriy level */
@@ -69,10 +70,10 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
* ir_calc = ---------------- * 1000
* tick * 1
*/
- ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
+ ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
- ir_para->ir_b = 126;
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
ir_para->ir_u = 0;
ir_para->ir_s = 0;
@@ -81,7 +82,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Increasing the denominator to select ir_s value */
while (ir_calc >= ir && ir) {
ir_s_calc++;
- ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
+ ir_calc = DEFAULT_DIVISOR_IR_B /
+ (tick * (1 << ir_s_calc));
}
ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
@@ -92,12 +94,12 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
while (ir_calc < ir) {
ir_u_calc++;
- numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
+ numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
ir_calc = (numerator + (tick >> 1)) / tick;
}
if (ir_calc == ir) {
- ir_para->ir_b = 126;
+ ir_para->ir_b = DEFAULT_SHAPER_IR_B;
} else {
u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
ir_para->ir_b = (ir * tick + (denominator >> 1)) /
@@ -640,13 +642,18 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
/* TC configuration is shared by PF/VF in one port, only allow
* one tc for VF for simplicity. VF's vport_id is non zero.
*/
- kinfo->tc_info.num_tc = vport->vport_id ? 1 :
+ if (vport->vport_id) {
+ kinfo->tc_info.num_tc = 1;
+ vport->qs_offset = HNAE3_MAX_TC +
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM;
+ vport_max_rss_size = hdev->vf_rss_size_max;
+ } else {
+ kinfo->tc_info.num_tc =
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
- vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
- (vport->vport_id ? (vport->vport_id - 1) : 0);
+ vport->qs_offset = 0;
+ vport_max_rss_size = hdev->pf_rss_size_max;
+ }
- vport_max_rss_size = vport->vport_id ? hdev->vf_rss_size_max :
- hdev->pf_rss_size_max;
max_rss_size = min_t(u16, vport_max_rss_size,
hclge_vport_get_max_rss_size(vport));
@@ -1616,3 +1623,189 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
return hclge_tm_bp_setup(hdev);
}
+
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ /* Each PF has 8 qsets and each VF has 1 qset */
+ *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *qset_num = le16_to_cpu(nodes->qset_num);
+ return 0;
+}
+
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
+{
+ struct hclge_tm_nodes_cmd *nodes;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
+ *pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get pri num, ret = %d\n", ret);
+ return ret;
+ }
+
+ nodes = (struct hclge_tm_nodes_cmd *)desc.data;
+ *pri_num = nodes->pri_num;
+ return 0;
+}
+
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld)
+{
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ map->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset map priority, ret = %d\n", ret);
+ return ret;
+ }
+
+ *priority = map->priority;
+ *link_vld = map->link_vld;
+ return 0;
+}
+
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
+{
+ struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
+ qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
+ qs_sch_mode->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = qs_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
+{
+ struct hclge_qs_weight_cmd *qs_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ qs_weight->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get qset weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = qs_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
+{
+ struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
+ pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
+ pri_sch_mode->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority sch mode, ret = %d\n", ret);
+ return ret;
+ }
+
+ *mode = pri_sch_mode->sch_mode;
+ return 0;
+}
+
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ priority_weight->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority weight, ret = %d\n", ret);
+ return ret;
+ }
+
+ *weight = priority_weight->dwrr;
+ return 0;
+}
+
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_pri_shaper_para *para)
+{
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
+ cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
+ return -EINVAL;
+
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ shap_cfg_cmd->pri_id = pri_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get priority shaper(%#x), ret = %d\n",
+ cmd, ret);
+ return ret;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
+ para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
+ para->flag = shap_cfg_cmd->flag;
+ para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 5498d73ed34b..b25d76023af0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -17,10 +17,13 @@
/* SP or DWRR */
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
-#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
+#define HCLGE_TM_TX_SCHD_SP_MSK 0xFE
#define HCLGE_ETHER_MAX_RATE 100000
+#define HCLGE_TM_PF_MAX_PRI_NUM 8
+#define HCLGE_TM_PF_MAX_QSET_NUM 8
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -65,6 +68,18 @@ struct hclge_priority_weight_cmd {
u8 dwrr;
};
+struct hclge_pri_sch_mode_cfg_cmd {
+ u8 pri_id;
+ u8 rsvd[3];
+ u8 sch_mode;
+};
+
+struct hclge_qs_sch_mode_cfg_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ u8 sch_mode;
+};
+
struct hclge_qs_weight_cmd {
__le16 qs_id;
u8 dwrr;
@@ -173,13 +188,34 @@ struct hclge_shaper_ir_para {
u8 ir_s; /* IR_S parameter of IR shaper */
};
+struct hclge_tm_nodes_cmd {
+ u8 pg_base_id;
+ u8 pri_base_id;
+ __le16 qset_base_id;
+ __le16 queue_base_id;
+ u8 pg_num;
+ u8 pri_num;
+ __le16 qset_num;
+ __le16 queue_num;
+};
+
+struct hclge_pri_shaper_para {
+ u8 ir_b;
+ u8 ir_u;
+ u8 ir_s;
+ u8 bs_b;
+ u8 bs_s;
+ u8 flag;
+ u32 rate;
+};
+
#define hclge_tm_set_field(dest, string, val) \
hnae3_set_field((dest), \
(HCLGE_TM_SHAP_##string##_MSK), \
(HCLGE_TM_SHAP_##string##_LSH), val)
#define hclge_tm_get_field(src, string) \
- hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
- (HCLGE_TM_SHAP_##string##_LSH))
+ hnae3_get_field((src), HCLGE_TM_SHAP_##string##_MSK, \
+ HCLGE_TM_SHAP_##string##_LSH)
int hclge_tm_schd_init(struct hclge_dev *hdev);
int hclge_tm_vport_map_update(struct hclge_dev *hdev);
@@ -195,5 +231,15 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
-
+int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num);
+int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num);
+int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
+ u8 *link_vld);
+int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode);
+int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight);
+int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode);
+int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight);
+int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
+ enum hclge_opcode_type cmd,
+ struct hclge_pri_shaper_para *para);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index e04c0cfeb95c..46700c427849 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -176,36 +176,111 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
}
+struct vf_errcode {
+ u32 imp_errcode;
+ int common_errno;
+};
+
+static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_desc *desc_to_use;
+ int handle = 0;
+
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+}
+
static int hclgevf_cmd_convert_err_code(u16 desc_ret)
{
- switch (desc_ret) {
- case HCLGEVF_CMD_EXEC_SUCCESS:
- return 0;
- case HCLGEVF_CMD_NO_AUTH:
- return -EPERM;
- case HCLGEVF_CMD_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case HCLGEVF_CMD_QUEUE_FULL:
- return -EXFULL;
- case HCLGEVF_CMD_NEXT_ERR:
- return -ENOSR;
- case HCLGEVF_CMD_UNEXE_ERR:
- return -ENOTBLK;
- case HCLGEVF_CMD_PARA_ERR:
- return -EINVAL;
- case HCLGEVF_CMD_RESULT_ERR:
- return -ERANGE;
- case HCLGEVF_CMD_TIMEOUT:
- return -ETIME;
- case HCLGEVF_CMD_HILINK_ERR:
- return -ENOLINK;
- case HCLGEVF_CMD_QUEUE_ILLEGAL:
- return -ENXIO;
- case HCLGEVF_CMD_INVALID:
- return -EBADR;
- default:
- return -EIO;
+ struct vf_errcode hclgevf_cmd_errcode[] = {
+ {HCLGEVF_CMD_EXEC_SUCCESS, 0},
+ {HCLGEVF_CMD_NO_AUTH, -EPERM},
+ {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
+ {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
+ {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
+ {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
+ {HCLGEVF_CMD_PARA_ERR, -EINVAL},
+ {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
+ {HCLGEVF_CMD_TIMEOUT, -ETIME},
+ {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
+ {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
+ {HCLGEVF_CMD_INVALID, -EBADR},
+ };
+ u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
+ u32 i;
+
+ for (i = 0; i < errcode_count; i++)
+ if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
+ return hclgevf_cmd_errcode[i].common_errno;
+
+ return -EIO;
+}
+
+static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num, int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ /* Get the result of hardware write back */
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
}
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+ hw->cmq.last_status = desc_ret;
+
+ return hclgevf_cmd_convert_err_code(desc_ret);
+}
+
+static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
+ struct hclgevf_desc *desc, int num, int ntc)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ bool is_completed = false;
+ u32 timeout = 0;
+ int handle, ret;
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw)) {
+ is_completed = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!is_completed)
+ ret = -EBADE;
+ else
+ ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle < 0)
+ ret = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+ return ret;
}
/* hclgevf_cmd_send - send command to command queue
@@ -220,13 +295,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
{
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- struct hclgevf_desc *desc_to_use;
- bool complete = false;
- u32 timeout = 0;
- int handle = 0;
- int status = 0;
- u16 retval;
- u16 opcode;
+ int ret;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -250,67 +319,18 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- opcode = le16_to_cpu(desc[0].opcode);
- while (handle < num) {
- desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
- *desc_to_use = desc[handle];
- (hw->cmq.csq.next_to_use)++;
- if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
- hw->cmq.csq.next_to_use = 0;
- handle++;
- }
+
+ hclgevf_cmd_copy_desc(hw, desc, num);
/* Write to hardware */
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
hw->cmq.csq.next_to_use);
- /* If the command is sync, wait for the firmware to write back,
- * if multi descriptors to be sent, use the first one to check
- */
- if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
- do {
- if (hclgevf_cmd_csq_done(hw))
- break;
- udelay(1);
- timeout++;
- } while (timeout < hw->cmq.tx_timeout);
- }
-
- if (hclgevf_cmd_csq_done(hw)) {
- complete = true;
- handle = 0;
-
- while (handle < num) {
- /* Get the result of hardware write back */
- desc_to_use = &hw->cmq.csq.desc[ntc];
- desc[handle] = *desc_to_use;
-
- if (likely(!hclgevf_is_special_opcode(opcode)))
- retval = le16_to_cpu(desc[handle].retval);
- else
- retval = le16_to_cpu(desc[0].retval);
-
- status = hclgevf_cmd_convert_err_code(retval);
- hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
- ntc++;
- handle++;
- if (ntc == hw->cmq.csq.desc_num)
- ntc = 0;
- }
- }
-
- if (!complete)
- status = -EBADE;
-
- /* Clean the command send queue */
- handle = hclgevf_cmd_csq_clean(hw);
- if (handle != num)
- dev_warn(&hdev->pdev->dev,
- "cleaned %d, need to clean %d\n", handle, num);
+ ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
spin_unlock_bh(&hw->cmq.csq.lock);
- return status;
+ return ret;
}
static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
@@ -342,6 +362,15 @@ static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps);
}
+static __le32 hclgevf_build_api_caps(void)
+{
+ u32 api_caps = 0;
+
+ hnae3_set_bit(api_caps, HCLGEVF_API_CAP_FLEX_RSS_TBL_B, 1);
+
+ return cpu_to_le32(api_caps);
+}
+
static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
@@ -352,6 +381,7 @@ static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
resp = (struct hclgevf_query_version_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+ resp->api_caps = hclgevf_build_api_caps();
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
return status;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 82eed258e8c1..8a37a22a176b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -161,11 +161,15 @@ enum HCLGEVF_CAP_BITS {
HCLGEVF_CAP_UDP_TUNNEL_CSUM_B,
};
+enum HCLGEVF_API_CAP_BITS {
+ HCLGEVF_API_CAP_FLEX_RSS_TBL_B,
+};
+
#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd {
__le32 firmware;
__le32 hardware;
- __le32 rsv;
+ __le32 api_caps;
__le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
};
@@ -212,8 +216,8 @@ struct hclgevf_rss_input_tuple_cmd {
#define HCLGEVF_RSS_CFG_TBL_SIZE 16
struct hclgevf_rss_indirection_table_cmd {
- u16 start_table_index;
- u16 rss_set_bitmap;
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
u8 rsv[4];
u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
};
@@ -225,7 +229,7 @@ struct hclgevf_rss_indirection_table_cmd {
#define HCLGEVF_RSS_TC_VALID_B 15
#define HCLGEVF_MAX_TC_NUM 8
struct hclgevf_rss_tc_mode_cmd {
- u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+ __le16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
u8 rsv[8];
};
@@ -274,7 +278,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
-#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
@@ -292,7 +295,8 @@ struct hclgevf_dev_specs_0_cmd {
#define HCLGEVF_DEF_MAX_INT_GL 0x1FE0U
struct hclgevf_dev_specs_1_cmd {
- __le32 rsv0;
+ __le16 max_frm_size;
+ __le16 rsv0;
__le16 max_int_gl;
u8 rsv1[18];
};
@@ -310,9 +314,9 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
}
#define hclgevf_write_dev(a, reg, value) \
- hclgevf_write_reg((a)->io_base, (reg), (value))
+ hclgevf_write_reg((a)->io_base, reg, value)
#define hclgevf_read_dev(a, reg) \
- hclgevf_read_reg((a)->io_base, (reg))
+ hclgevf_read_reg((a)->io_base, reg)
#define HCLGEVF_SEND_SYNC(flag) \
((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 674b3a22e91f..700e068764c8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -180,7 +180,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "txq%u_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -188,7 +188,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
struct hclgevf_tqp, q);
- snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%u_pktnum_rcd",
tqp->index);
buff += ETH_GSTRING_LEN;
}
@@ -642,26 +642,25 @@ static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
return HCLGEVF_RSS_KEY_SIZE;
}
-static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
-{
- return HCLGEVF_RSS_IND_TBL_SIZE;
-}
-
static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
{
const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
struct hclgevf_rss_indirection_table_cmd *req;
struct hclgevf_desc desc;
+ int rss_cfg_tbl_num;
int status;
int i, j;
req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+ rss_cfg_tbl_num = hdev->ae_dev->dev_specs.rss_ind_tbl_size /
+ HCLGEVF_RSS_CFG_TBL_SIZE;
- for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+ for (i = 0; i < rss_cfg_tbl_num; i++) {
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
false);
- req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
- req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+ req->start_table_index =
+ cpu_to_le16(i * HCLGEVF_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap = cpu_to_le16(HCLGEVF_RSS_SET_BITMAP_MSK);
for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
req->rss_result[j] =
indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
@@ -702,12 +701,16 @@ static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
- hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ u16 mode = 0;
+
+ hnae3_set_bit(mode, HCLGEVF_RSS_TC_VALID_B,
(tc_valid[i] & 0x1));
- hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ hnae3_set_field(mode, HCLGEVF_RSS_TC_SIZE_M,
HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
- hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ hnae3_set_field(mode, HCLGEVF_RSS_TC_OFFSET_M,
HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
}
status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (status)
@@ -795,7 +798,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
}
if (indir)
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
return 0;
@@ -838,7 +841,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
}
/* update the shadow RSS table with user specified qids */
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = indir[i];
/* update the hardware */
@@ -870,25 +873,13 @@ static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets;
}
-static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc,
+ struct hclgevf_rss_input_tuple_cmd *req)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- struct hclgevf_rss_input_tuple_cmd *req;
- struct hclgevf_desc desc;
u8 tuple_sets;
- int ret;
-
- if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
- return -EOPNOTSUPP;
-
- if (nfc->data &
- ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
- hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
@@ -933,6 +924,35 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL;
}
+ return 0;
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init rss tuple cmd, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -951,56 +971,73 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return 0;
}
-static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
- struct ethtool_rxnfc *nfc)
+static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
+ int flow_type, u8 *tuple_sets)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
- u8 tuple_sets;
-
- if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
- return -EOPNOTSUPP;
-
- nfc->data = 0;
-
- switch (nfc->flow_type) {
+ switch (flow_type) {
case TCP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
break;
case UDP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
break;
case TCP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
break;
case UDP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
break;
case SCTP_V4_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
break;
case SCTP_V6_FLOW:
- tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
break;
case IPV4_FLOW:
case IPV6_FLOW:
- tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
+ *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
break;
default:
return -EINVAL;
}
- if (!tuple_sets)
- return 0;
+ return 0;
+}
+
+static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
+{
+ u64 tuple_data = 0;
if (tuple_sets & HCLGEVF_D_PORT_BIT)
- nfc->data |= RXH_L4_B_2_3;
+ tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGEVF_S_PORT_BIT)
- nfc->data |= RXH_L4_B_0_1;
+ tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGEVF_D_IP_BIT)
- nfc->data |= RXH_IP_DST;
+ tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGEVF_S_IP_BIT)
- nfc->data |= RXH_IP_SRC;
+ tuple_data |= RXH_IP_SRC;
+
+ return tuple_data;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 tuple_sets;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
+ &tuple_sets);
+ if (ret || !tuple_sets)
+ return ret;
+
+ nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
return 0;
}
@@ -2482,8 +2519,9 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
return ret;
}
-static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
+static int hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
{
+ u16 rss_ind_tbl_size = hdev->ae_dev->dev_specs.rss_ind_tbl_size;
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclgevf_rss_tuple_cfg *tuple_sets;
u32 i;
@@ -2492,7 +2530,16 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
tuple_sets = &rss_cfg->rss_tuple_sets;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ u8 *rss_ind_tbl;
+
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+
+ rss_ind_tbl = devm_kcalloc(&hdev->pdev->dev, rss_ind_tbl_size,
+ sizeof(*rss_ind_tbl), GFP_KERNEL);
+ if (!rss_ind_tbl)
+ return -ENOMEM;
+
+ rss_cfg->rss_indirection_tbl = rss_ind_tbl;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
@@ -2510,8 +2557,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
}
/* Initialize RSS indirect table */
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+
+ return 0;
}
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
@@ -3048,6 +3097,7 @@ static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
ae_dev->dev_specs.max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
+ ae_dev->dev_specs.max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
@@ -3066,6 +3116,7 @@ static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
+ ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
}
static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
@@ -3080,6 +3131,8 @@ static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
if (!dev_specs->max_int_gl)
dev_specs->max_int_gl = HCLGEVF_DEF_MAX_INT_GL;
+ if (!dev_specs->max_frm_size)
+ dev_specs->max_frm_size = HCLGEVF_MAC_MAX_FRAME;
}
static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
@@ -3266,7 +3319,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
/* Initialize RSS for this VF */
- hclgevf_rss_init_cfg(hdev);
+ ret = hclgevf_rss_init_cfg(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
+ goto err_config;
+ }
+
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -3444,11 +3502,12 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
goto out;
/* Reinitializes the rss indirect table according to the new RSS size */
- rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ rss_indir = kcalloc(hdev->ae_dev->dev_specs.rss_ind_tbl_size,
+ sizeof(u32), GFP_KERNEL);
if (!rss_indir)
return -ENOMEM;
- for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ for (i = 0; i < hdev->ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_indir[i] = i % kinfo->rss_size;
hdev->rss_cfg.rss_size = kinfo->rss_size;
@@ -3687,7 +3746,6 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_strings = hclgevf_get_strings,
.get_sset_count = hclgevf_get_sset_count,
.get_rss_key_size = hclgevf_get_rss_key_size,
- .get_rss_indir_size = hclgevf_get_rss_indir_size,
.get_rss = hclgevf_get_rss,
.set_rss = hclgevf_set_rss,
.get_rss_tuple = hclgevf_get_rss_tuple,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index f6d817a3edcb..8c27ecd819af 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -113,8 +113,7 @@
#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
-#define HCLGEVF_RSS_CFG_TBL_NUM \
- (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+
#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
#define HCLGEVF_D_PORT_BIT BIT(0)
@@ -125,6 +124,8 @@
#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
(HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
+#define HCLGEVF_MAC_MAX_FRAME 9728
+
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
enum hclgevf_evt_cause {
@@ -217,7 +218,8 @@ struct hclgevf_rss_cfg {
u32 hash_algo;
u32 rss_size;
u8 hw_tc_map;
- u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+ /* shadow table */
+ u8 *rss_indirection_tbl;
struct hclgevf_rss_tuple_cfg rss_tuple_sets;
};
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index a0bfb509e002..c612ef526d16 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -20,7 +20,7 @@
* 1.02 RMK 25/05/1997 Added code to restart RU if it goes not ready
* 1.03 RMK 14/09/1997 Cleaned up the handling of a reset during the TX interrupt.
* Should prevent lockup.
- * 1.04 RMK 17/09/1997 Added more info when initialsation of chip goes wrong.
+ * 1.04 RMK 17/09/1997 Added more info when initialisation of chip goes wrong.
* TDR now only reports failure when chip reports non-zero
* TDR time-distance.
* 1.05 RMK 31/12/1997 Removed calls to dev_tint for 2.1
@@ -117,7 +117,7 @@ ether1_outw_p (struct net_device *dev, unsigned short val, int addr, int svflgs)
* Some inline assembler to allow fast transfers on to/off of the card.
* Since this driver depends on some features presented by the ARM
* specific architecture, and that you can't configure this driver
- * without specifiing ARM mode, this is not a problem.
+ * without specifying ARM mode, this is not a problem.
*
* This routine is essentially an optimised memcpy from the card's
* onboard RAM to kernel memory.
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index f79034c786c8..1c0e4beb56e7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -115,7 +115,7 @@ struct ibmvnic_stat {
#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
offsetof(struct ibmvnic_statistics, stat))
-#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
+#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
static const struct ibmvnic_stat ibmvnic_stats[] = {
{"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
@@ -247,46 +247,23 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
if (!ltb->buff)
return;
+ /* VIOS automatically unmaps the long term buffer at remote
+ * end for the following resets:
+ * FAILOVER, MOBILITY, TIMEOUT.
+ */
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
- adapter->reset_reason != VNIC_RESET_MOBILITY)
+ adapter->reset_reason != VNIC_RESET_MOBILITY &&
+ adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
}
-static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
- struct ibmvnic_long_term_buff *ltb)
+static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb)
{
- struct device *dev = &adapter->vdev->dev;
- int rc;
+ if (!ltb->buff)
+ return -EINVAL;
memset(ltb->buff, 0, ltb->size);
-
- mutex_lock(&adapter->fw_lock);
- adapter->fw_done_rc = 0;
-
- reinit_completion(&adapter->fw_done);
- rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc) {
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
- if (rc) {
- dev_info(dev,
- "Reset failed, long term map request timed out or aborted\n");
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- if (adapter->fw_done_rc) {
- dev_info(dev,
- "Reset failed, attempting to free and reallocate buffer\n");
- free_long_term_buff(adapter, ltb);
- mutex_unlock(&adapter->fw_lock);
- return alloc_long_term_buff(adapter, ltb, ltb->size);
- }
- mutex_unlock(&adapter->fw_lock);
return 0;
}
@@ -508,8 +485,7 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter)
rx_pool->size *
rx_pool->buff_size);
} else {
- rc = reset_long_term_buff(adapter,
- &rx_pool->long_term_buff);
+ rc = reset_long_term_buff(&rx_pool->long_term_buff);
}
if (rc)
@@ -632,12 +608,11 @@ static int init_rx_pools(struct net_device *netdev)
return 0;
}
-static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_tx_pool *tx_pool)
+static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool)
{
int rc, i;
- rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
+ rc = reset_long_term_buff(&tx_pool->long_term_buff);
if (rc)
return rc;
@@ -664,10 +639,10 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
- rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
+ rc = reset_one_tx_pool(&adapter->tso_pool[i]);
if (rc)
return rc;
- rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
+ rc = reset_one_tx_pool(&adapter->tx_pool[i]);
if (rc)
return rc;
}
@@ -1221,8 +1196,7 @@ static int ibmvnic_open(struct net_device *netdev)
rc = __ibmvnic_open(netdev);
out:
- /*
- * If open fails due to a pending failover, set device state and
+ /* If open fails due to a pending failover, set device state and
* return. Device operation will be handled by reset routine.
*/
if (rc && adapter->failover_pending) {
@@ -1353,10 +1327,8 @@ static int __ibmvnic_close(struct net_device *netdev)
adapter->state = VNIC_CLOSING;
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- if (rc)
- return rc;
adapter->state = VNIC_CLOSED;
- return 0;
+ return rc;
}
static int ibmvnic_close(struct net_device *netdev)
@@ -1384,10 +1356,10 @@ static int ibmvnic_close(struct net_device *netdev)
/**
* build_hdr_data - creates L2/L3/L4 header data buffer
- * @hdr_field - bitfield determining needed headers
- * @skb - socket buffer
- * @hdr_len - array of header lengths
- * @tot_len - total length of data
+ * @hdr_field: bitfield determining needed headers
+ * @skb: socket buffer
+ * @hdr_len: array of header lengths
+ * @hdr_data: buffer to write the header to
*
* Reads hdr_field to determine which headers are needed by firmware.
* Builds a buffer containing these headers. Saves individual header
@@ -1444,11 +1416,11 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
/**
* create_hdr_descs - create header and header extension descriptors
- * @hdr_field - bitfield determining needed headers
- * @data - buffer containing header data
- * @len - length of data buffer
- * @hdr_len - array of individual header lengths
- * @scrq_arr - descriptor array
+ * @hdr_field: bitfield determining needed headers
+ * @hdr_data: buffer containing header data
+ * @len: length of data buffer
+ * @hdr_len: array of individual header lengths
+ * @scrq_arr: descriptor array
*
* Creates header and, if needed, header extension descriptors and
* places them in a descriptor array, scrq_arr
@@ -1496,10 +1468,9 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
/**
* build_hdr_descs_arr - build a header descriptor array
- * @skb - socket buffer
- * @num_entries - number of descriptors to be sent
- * @subcrq - first TX descriptor
- * @hdr_field - bit field determining which headers will be sent
+ * @txbuff: tx buffer
+ * @num_entries: number of descriptors to be sent
+ * @hdr_field: bit field determining which headers will be sent
*
* This function will build a TX descriptor array with applicable
* L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
@@ -1702,6 +1673,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
skb_copy_from_linear_data(skb, dst, skb->len);
}
+ /* post changes to long_term_buff *dst before VIOS accessing it */
+ dma_wmb();
+
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
@@ -1925,93 +1899,7 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
return rc;
}
-/**
- * do_change_param_reset returns zero if we are able to keep processing reset
- * events, or non-zero if we hit a fatal error and must halt.
- */
-static int do_change_param_reset(struct ibmvnic_adapter *adapter,
- struct ibmvnic_rwi *rwi,
- u32 reset_state)
-{
- struct net_device *netdev = adapter->netdev;
- int i, rc;
-
- netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
- rwi->reset_reason);
-
- netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
-
- ibmvnic_cleanup(netdev);
-
- if (reset_state == VNIC_OPEN) {
- rc = __ibmvnic_close(netdev);
- if (rc)
- goto out;
- }
-
- release_resources(adapter);
- release_sub_crqs(adapter, 1);
- release_crq_queue(adapter);
-
- adapter->state = VNIC_PROBED;
-
- rc = init_crq_queue(adapter);
-
- if (rc) {
- netdev_err(adapter->netdev,
- "Couldn't initialize crq. rc=%d\n", rc);
- return rc;
- }
-
- rc = ibmvnic_reset_init(adapter, true);
- if (rc) {
- rc = IBMVNIC_INIT_FAILED;
- goto out;
- }
-
- /* If the adapter was in PROBE state prior to the reset,
- * exit here.
- */
- if (reset_state == VNIC_PROBED)
- goto out;
-
- rc = ibmvnic_login(netdev);
- if (rc) {
- goto out;
- }
-
- rc = init_resources(adapter);
- if (rc)
- goto out;
-
- ibmvnic_disable_irqs(adapter);
-
- adapter->state = VNIC_CLOSED;
-
- if (reset_state == VNIC_CLOSED)
- return 0;
-
- rc = __ibmvnic_open(netdev);
- if (rc) {
- rc = IBMVNIC_OPEN_FAILED;
- goto out;
- }
-
- /* refresh device's multicast list */
- ibmvnic_set_multi(netdev);
-
- /* kick napi */
- for (i = 0; i < adapter->req_rx_queues; i++)
- napi_schedule(&adapter->napi[i]);
-
-out:
- if (rc)
- adapter->state = reset_state;
- return rc;
-}
-
-/**
+/*
* do_reset returns zero if we are able to keep processing reset events, or
* non-zero if we hit a fatal error and must halt.
*/
@@ -2028,9 +1916,12 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->state, adapter->failover_pending,
rwi->reset_reason, reset_state);
- rtnl_lock();
- /*
- * Now that we have the rtnl lock, clear any pending failover.
+ adapter->reset_reason = rwi->reset_reason;
+ /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_lock();
+
+ /* Now that we have the rtnl lock, clear any pending failover.
* This will ensure ibmvnic_open() has either completed or will
* block until failover is complete.
*/
@@ -2038,7 +1929,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
adapter->failover_pending = false;
netif_carrier_off(netdev);
- adapter->reset_reason = rwi->reset_reason;
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
@@ -2050,25 +1940,37 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (reset_state == VNIC_OPEN &&
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_FAILOVER) {
- adapter->state = VNIC_CLOSING;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = __ibmvnic_close(netdev);
+ if (rc)
+ goto out;
+ } else {
+ adapter->state = VNIC_CLOSING;
- /* Release the RTNL lock before link state change and
- * re-acquire after the link state change to allow
- * linkwatch_event to grab the RTNL lock and run during
- * a reset.
- */
- rtnl_unlock();
- rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
- rtnl_lock();
- if (rc)
- goto out;
+ /* Release the RTNL lock before link state change and
+ * re-acquire after the link state change to allow
+ * linkwatch_event to grab the RTNL lock and run during
+ * a reset.
+ */
+ rtnl_unlock();
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+ rtnl_lock();
+ if (rc)
+ goto out;
- if (adapter->state != VNIC_CLOSING) {
- rc = -1;
- goto out;
+ if (adapter->state != VNIC_CLOSING) {
+ rc = -1;
+ goto out;
+ }
+
+ adapter->state = VNIC_CLOSED;
}
+ }
- adapter->state = VNIC_CLOSED;
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ release_resources(adapter);
+ release_sub_crqs(adapter, 1);
+ release_crq_queue(adapter);
}
if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
@@ -2077,7 +1979,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
- if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_crq_queue(adapter);
+ } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
rc = ibmvnic_reenable_crq_queue(adapter);
release_sub_crqs(adapter, 1);
} else {
@@ -2112,11 +2016,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
}
rc = ibmvnic_login(netdev);
- if (rc) {
+ if (rc)
goto out;
- }
- if (adapter->req_rx_queues != old_num_rx_queues ||
+ if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
+ rc = init_resources(adapter);
+ if (rc)
+ goto out;
+ } else if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues ||
adapter->req_rx_add_entries_per_subcrq !=
old_num_rx_slots ||
@@ -2138,14 +2045,14 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = reset_tx_pools(adapter);
if (rc) {
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
- rc);
+ rc);
goto out;
}
rc = reset_rx_pools(adapter);
if (rc) {
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
- rc);
+ rc);
goto out;
}
}
@@ -2181,7 +2088,9 @@ out:
/* restore the adapter state if reset failed */
if (rc)
adapter->state = reset_state;
- rtnl_unlock();
+ /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
+ if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
+ rtnl_unlock();
netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
adapter->state, adapter->failover_pending, rc);
@@ -2312,12 +2221,8 @@ static void __ibmvnic_reset(struct work_struct *work)
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
- /* CHANGE_PARAM requestor holds rtnl_lock */
- rc = do_change_param_reset(adapter, rwi, reset_state);
- } else if (adapter->force_reset_recovery) {
- /*
- * Since we are doing a hard reset now, clear the
+ if (adapter->force_reset_recovery) {
+ /* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any
* future MOBILITY or other resets.
*/
@@ -2389,8 +2294,9 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
unsigned long flags;
int ret;
- /*
- * If failover is pending don't schedule any other reset.
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+
+ /* If failover is pending don't schedule any other reset.
* Instead let the failover complete. If there is already a
* a failover reset scheduled, we will detect and drop the
* duplicate reset when walking the ->rwi_list below.
@@ -2405,18 +2311,16 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- ret = adapter->init_done_rc = EAGAIN;
+ adapter->init_done_rc = EAGAIN;
+ ret = EAGAIN;
goto err;
}
- spin_lock_irqsave(&adapter->rwi_lock, flags);
-
list_for_each(entry, &adapter->rwi_list) {
tmp = list_entry(entry, struct ibmvnic_rwi, list);
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
reason);
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
ret = EBUSY;
goto err;
}
@@ -2424,8 +2328,6 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
if (!rwi) {
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
- ibmvnic_close(netdev);
ret = ENOMEM;
goto err;
}
@@ -2438,12 +2340,17 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
}
rwi->reset_reason = reason;
list_add_tail(&rwi->list, &adapter->rwi_list);
- spin_unlock_irqrestore(&adapter->rwi_lock, flags);
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
schedule_work(&adapter->ibmvnic_reset);
- return 0;
+ ret = 0;
err:
+ /* ibmvnic_close() below can block, so drop the lock first */
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
+ if (ret == ENOMEM)
+ ibmvnic_close(netdev);
+
return -ret;
}
@@ -2511,16 +2418,9 @@ restart_poll:
if (!pending_scrq(adapter, rx_scrq))
break;
- /* The queue entry at the current index is peeked at above
- * to determine that there is a valid descriptor awaiting
- * processing. We want to be sure that the current slot
- * holds a valid descriptor before reading its contents.
- */
- dma_rmb();
next = ibmvnic_next_scrq(adapter, rx_scrq);
- rx_buff =
- (struct ibmvnic_rx_buff *)be64_to_cpu(next->
- rx_comp.correlator);
+ rx_buff = (struct ibmvnic_rx_buff *)
+ be64_to_cpu(next->rx_comp.correlator);
/* do error checking */
if (next->rx_comp.rc) {
netdev_dbg(netdev, "rx buffer returned with rc %x\n",
@@ -2541,6 +2441,8 @@ restart_poll:
offset = be16_to_cpu(next->rx_comp.off_frame_data);
flags = next->rx_comp.flags;
skb = rx_buff->skb;
+ /* load long_term_buff before copying to skb */
+ dma_rmb();
skb_copy_to_linear_data(skb, rx_buff->data + offset,
length);
@@ -2583,7 +2485,6 @@ restart_poll:
if (napi_complete_done(napi, frames_processed)) {
enable_scrq_irq(adapter, rx_scrq);
if (pending_scrq(adapter, rx_scrq)) {
- rmb();
if (napi_reschedule(napi)) {
disable_scrq_irq(adapter, rx_scrq);
goto restart_poll;
@@ -2712,9 +2613,9 @@ static void ibmvnic_get_drvinfo(struct net_device *netdev,
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
- strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
- strlcpy(info->fw_version, adapter->fw_version,
+ strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
+ strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
+ strscpy(info->fw_version, adapter->fw_version,
sizeof(info->fw_version));
}
@@ -2826,7 +2727,6 @@ static int ibmvnic_set_channels(struct net_device *netdev,
channels->rx_count, channels->tx_count,
adapter->req_rx_queues, adapter->req_tx_queues);
return ret;
-
}
static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -2915,8 +2815,8 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
return;
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
- data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
- ibmvnic_stats[i].offset));
+ data[i] = be64_to_cpu(IBMVNIC_GET_STAT
+ (adapter, ibmvnic_stats[i].offset));
for (j = 0; j < adapter->req_tx_queues; j++) {
data[i] = adapter->tx_stats_buffers[j].packets;
@@ -2956,6 +2856,7 @@ static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
return 0;
}
+
static const struct ethtool_ops ibmvnic_ethtool_ops = {
.get_drvinfo = ibmvnic_get_drvinfo,
.get_msglevel = ibmvnic_get_msglevel,
@@ -3225,7 +3126,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
/* H_EOI would fail with rc = H_FUNCTION when running
* in XIVE mode which is expected, but not an error.
*/
- if (rc && (rc != H_FUNCTION))
+ if (rc && rc != H_FUNCTION)
dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
val, rc);
}
@@ -3256,13 +3157,6 @@ restart_loop:
int total_bytes = 0;
int num_packets = 0;
- /* The queue entry at the current index is peeked at above
- * to determine that there is a valid descriptor awaiting
- * processing. We want to be sure that the current slot
- * holds a valid descriptor before reading its contents.
- */
- dma_rmb();
-
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
if (next->tx_comp.rcs[i])
@@ -3636,11 +3530,16 @@ static int pending_scrq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
union sub_crq *entry = &scrq->msgs[scrq->cur];
+ int rc;
- if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
- return 1;
- else
- return 0;
+ rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
+
+ /* Ensure that the SCRQ valid flag is loaded prior to loading the
+ * contents of the SCRQ descriptor
+ */
+ dma_rmb();
+
+ return rc;
}
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
@@ -3659,8 +3558,8 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
}
spin_unlock_irqrestore(&scrq->lock, flags);
- /* Ensure that the entire buffer descriptor has been
- * loaded before reading its contents
+ /* Ensure that the SCRQ valid flag is loaded prior to loading the
+ * contents of the SCRQ descriptor
*/
dma_rmb();
@@ -3710,7 +3609,7 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
int rc;
/* Make sure the hypervisor sees the complete request */
- mb();
+ dma_wmb();
rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
cpu_to_be64(remote_handle),
ioba, num_entries);
@@ -3730,8 +3629,8 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
int rc;
netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
- (unsigned long int)cpu_to_be64(u64_crq[0]),
- (unsigned long int)cpu_to_be64(u64_crq[1]));
+ (unsigned long)cpu_to_be64(u64_crq[0]),
+ (unsigned long)cpu_to_be64(u64_crq[1]));
if (!adapter->crq.active &&
crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
@@ -3740,7 +3639,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
}
/* Make sure the hypervisor sees the complete request */
- mb();
+ dma_wmb();
rc = plpar_hcall_norets(H_SEND_CRQ, ua,
cpu_to_be64(u64_crq[0]),
@@ -3936,15 +3835,15 @@ static int send_login(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_tx_queues; i++) {
if (adapter->tx_scrq[i]) {
- tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
- crq_num);
+ tx_list_p[i] =
+ cpu_to_be64(adapter->tx_scrq[i]->crq_num);
}
}
for (i = 0; i < adapter->req_rx_queues; i++) {
if (adapter->rx_scrq[i]) {
- rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
- crq_num);
+ rx_list_p[i] =
+ cpu_to_be64(adapter->rx_scrq[i]->crq_num);
}
}
@@ -3960,7 +3859,7 @@ static int send_login(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Login Buffer:\n");
for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(adapter->login_buf))[i]);
+ ((unsigned long *)(adapter->login_buf))[i]);
}
memset(&crq, 0, sizeof(crq));
@@ -4328,7 +4227,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(buf))[i]);
+ ((unsigned long *)(buf))[i]);
netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
@@ -4487,8 +4386,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
case PARTIALSUCCESS:
dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
*req_value,
- (long int)be64_to_cpu(crq->request_capability_rsp.
- number), name);
+ (long)be64_to_cpu(crq->request_capability_rsp.number),
+ name);
if (be16_to_cpu(crq->request_capability_rsp.capability) ==
REQ_MTU) {
@@ -4558,7 +4457,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
- ((unsigned long int *)(adapter->login_rsp_buf))[i]);
+ ((unsigned long *)(adapter->login_rsp_buf))[i]);
}
/* Sanity checks */
@@ -4901,8 +4800,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
long rc;
netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
- (unsigned long int)cpu_to_be64(u64_crq[0]),
- (unsigned long int)cpu_to_be64(u64_crq[1]));
+ (unsigned long)cpu_to_be64(u64_crq[0]),
+ (unsigned long)cpu_to_be64(u64_crq[1]));
switch (gen_crq->first) {
case IBMVNIC_CRQ_INIT_RSP:
switch (gen_crq->cmd) {
@@ -4918,7 +4817,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
- ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ if (rc && rc != -EBUSY) {
+ /* We were unable to schedule the failover
+ * reset either because the adapter was still
+ * probing (eg: during kexec) or we could not
+ * allocate memory. Clear the failover_pending
+ * flag since no one else will. We ignore
+ * EBUSY because it means either FAILOVER reset
+ * is already scheduled or the adapter is
+ * being removed.
+ */
+ netdev_err(netdev,
+ "Error %ld scheduling failover reset\n",
+ rc);
+ adapter->failover_pending = false;
+ }
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -5361,8 +5275,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- spin_lock_init(&adapter->stats_lock);
-
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
__ibmvnic_delayed_reset);
@@ -5444,7 +5356,18 @@ static int ibmvnic_remove(struct vio_dev *dev)
unsigned long flags;
spin_lock_irqsave(&adapter->state_lock, flags);
+
+ /* If ibmvnic_reset() is scheduling a reset, wait for it to
+ * finish. Then, set the state to REMOVING to prevent it from
+ * scheduling any more work and to have reset functions ignore
+ * any resets that have already been scheduled. Drop the lock
+ * after setting state, so __ibmvnic_reset() which is called
+ * from the flush_work() below, can make progress.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
adapter->state = VNIC_REMOVING;
+ spin_unlock_irqrestore(&adapter->rwi_lock, flags);
+
spin_unlock_irqrestore(&adapter->state_lock, flags);
flush_work(&adapter->ibmvnic_reset);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index c09c3f6bba9f..806aa75a4e86 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -31,7 +31,7 @@
#define IBMVNIC_BUFFS_PER_POOL 100
#define IBMVNIC_MAX_QUEUES 16
#define IBMVNIC_MAX_QUEUE_SZ 4096
-#define IBMVNIC_MAX_IND_DESCS 128
+#define IBMVNIC_MAX_IND_DESCS 16
#define IBMVNIC_IND_ARR_SZ (IBMVNIC_MAX_IND_DESCS * 32)
#define IBMVNIC_TSO_BUF_SZ 65536
@@ -845,6 +845,7 @@ struct ibmvnic_crq_queue {
union ibmvnic_crq *msgs;
int size, cur;
dma_addr_t msg_token;
+ /* Used for serialization of msgs, cur */
spinlock_t lock;
bool active;
char name[32];
@@ -876,6 +877,7 @@ struct ibmvnic_sub_crq_queue {
unsigned int irq;
unsigned int pool_index;
int scrq_num;
+ /* Used for serialization of msgs, cur */
spinlock_t lock;
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
@@ -985,7 +987,6 @@ struct ibmvnic_adapter {
struct ibmvnic_statistics stats;
dma_addr_t stats_token;
struct completion stats_done;
- spinlock_t stats_lock;
int replenish_no_mem;
int replenish_add_buff_success;
int replenish_add_buff_failure;
@@ -1080,9 +1081,16 @@ struct ibmvnic_adapter {
struct tasklet_struct tasklet;
enum vnic_state state;
+ /* Used for serialization of state field. When taking both state
+ * and rwi locks, take state lock first.
+ */
+ spinlock_t state_lock;
enum ibmvnic_reset_reason reset_reason;
- spinlock_t rwi_lock;
struct list_head rwi_list;
+ /* Used for serialization of rwi_list. When taking both state
+ * and rwi locks, take state lock first
+ */
+ spinlock_t rwi_lock;
struct work_struct ibmvnic_reset;
struct delayed_work ibmvnic_delayed_reset;
unsigned long resetting;
@@ -1096,7 +1104,4 @@ struct ibmvnic_adapter {
struct ibmvnic_tunables desired;
struct ibmvnic_tunables fallback;
-
- /* Used for serializatin of state field */
- spinlock_t state_lock;
};
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 8cc651d37a7f..f8d78af76d7d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1739,10 +1739,10 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
dma_addr_t dma_addr;
cb->command = nic->tx_command;
- dma_addr = pci_map_single(nic->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
/* If we can't map the skb, have the upper layer try later */
- if (pci_dma_mapping_error(nic->pdev, dma_addr)) {
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
dev_kfree_skb_any(skb);
skb = NULL;
return -ENOMEM;
@@ -1828,10 +1828,10 @@ static int e100_tx_clean(struct nic *nic)
dev->stats.tx_packets++;
dev->stats.tx_bytes += cb->skb->len;
- pci_unmap_single(nic->pdev,
- le32_to_cpu(cb->u.tcb.tbd.buf_addr),
- le16_to_cpu(cb->u.tcb.tbd.size),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&nic->pdev->dev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(cb->skb);
cb->skb = NULL;
tx_cleaned = 1;
@@ -1855,10 +1855,10 @@ static void e100_clean_cbs(struct nic *nic)
while (nic->cbs_avail != nic->params.cbs.count) {
struct cb *cb = nic->cb_to_clean;
if (cb->skb) {
- pci_unmap_single(nic->pdev,
- le32_to_cpu(cb->u.tcb.tbd.buf_addr),
- le16_to_cpu(cb->u.tcb.tbd.size),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&nic->pdev->dev,
+ le32_to_cpu(cb->u.tcb.tbd.buf_addr),
+ le16_to_cpu(cb->u.tcb.tbd.size),
+ DMA_TO_DEVICE);
dev_kfree_skb(cb->skb);
}
nic->cb_to_clean = nic->cb_to_clean->next;
@@ -1925,10 +1925,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
/* Init, and map the RFD. */
skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
- rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data,
+ RFD_BUF_LEN, DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
+ if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = NULL;
rx->dma_addr = 0;
@@ -1941,8 +1941,10 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
if (rx->prev->skb) {
struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
- pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ rx->prev->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
}
return 0;
@@ -1961,8 +1963,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
return -EAGAIN;
/* Need to sync before taking a peek at cb_complete bit */
- pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd), DMA_BIDIRECTIONAL);
rfd_status = le16_to_cpu(rfd->status);
netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
@@ -1981,9 +1983,9 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
if (ioread8(&nic->csr->scb.status) & rus_no_res)
nic->ru_running = RU_SUSPENDED;
- pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
- sizeof(struct rfd),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_FROM_DEVICE);
return -ENODATA;
}
@@ -1995,8 +1997,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
actual_size = RFD_BUF_LEN - sizeof(struct rfd);
/* Get data */
- pci_unmap_single(nic->pdev, rx->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN,
+ DMA_BIDIRECTIONAL);
/* If this buffer has the el bit, but we think the receiver
* is still running, check to see if it really stopped while
@@ -2097,22 +2099,25 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
(struct rfd *)new_before_last_rx->skb->data;
new_before_last_rfd->size = 0;
new_before_last_rfd->command |= cpu_to_le16(cb_el);
- pci_dma_sync_single_for_device(nic->pdev,
- new_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ new_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
/* Now that we have a new stopping point, we can clear the old
* stopping point. We must sync twice to get the proper
* ordering on the hardware side of things. */
old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
- pci_dma_sync_single_for_device(nic->pdev,
- old_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ old_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN
+ ETH_FCS_LEN);
- pci_dma_sync_single_for_device(nic->pdev,
- old_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev,
+ old_before_last_rx->dma_addr,
+ sizeof(struct rfd),
+ DMA_BIDIRECTIONAL);
}
if (restart_required) {
@@ -2134,8 +2139,9 @@ static void e100_rx_clean_list(struct nic *nic)
if (nic->rxs) {
for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
if (rx->skb) {
- pci_unmap_single(nic->pdev, rx->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&nic->pdev->dev,
+ rx->dma_addr, RFD_BUF_LEN,
+ DMA_BIDIRECTIONAL);
dev_kfree_skb(rx->skb);
}
}
@@ -2177,8 +2183,8 @@ static int e100_rx_alloc_list(struct nic *nic)
before_last = (struct rfd *)rx->skb->data;
before_last->command |= cpu_to_le16(cb_el);
before_last->size = 0;
- pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
- sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
+ sizeof(struct rfd), DMA_BIDIRECTIONAL);
nic->rx_to_use = nic->rx_to_clean = nic->rxs;
nic->ru_running = RU_SUSPENDED;
@@ -2377,8 +2383,8 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
msleep(10);
- pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
- RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr,
+ RFD_BUF_LEN, DMA_BIDIRECTIONAL);
if (memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
skb->data, ETH_DATA_LEN))
@@ -2751,16 +2757,16 @@ static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
static int e100_alloc(struct nic *nic)
{
- nic->mem = pci_alloc_consistent(nic->pdev, sizeof(struct mem),
- &nic->dma_addr);
+ nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem),
+ &nic->dma_addr, GFP_KERNEL);
return nic->mem ? 0 : -ENOMEM;
}
static void e100_free(struct nic *nic)
{
if (nic->mem) {
- pci_free_consistent(nic->pdev, sizeof(struct mem),
- nic->mem, nic->dma_addr);
+ dma_free_coherent(&nic->pdev->dev, sizeof(struct mem),
+ nic->mem, nic->dma_addr);
nic->mem = NULL;
}
}
@@ -2853,7 +2859,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_disable_pdev;
}
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
goto err_out_free_res;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 5e28cf4fa2cd..042de276e632 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -2632,7 +2632,6 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (unlikely(adapter->link_speed != SPEED_1000)) {
- current_itr = 0;
new_itr = 4000;
goto set_itr_now;
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 6fb46682b058..0ac8d79a7987 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -3886,13 +3886,6 @@ static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
if (ret_val)
goto release;
- /* And invalidate the previously valid segment by setting
- * its signature word (0x13) high_byte to 0b. This can be
- * done without an erase because flash erase sets all bits
- * to 1's. We can write 1's to 0's without an erase
- */
- act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
-
/* offset in words but we read dword */
act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 99b8252eb969..247f44f4cb30 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -194,17 +194,12 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
DMA_FROM_DEVICE);
}
-static inline bool fm10k_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
struct page *page,
unsigned int __maybe_unused truesize)
{
- /* avoid re-using remote pages */
- if (unlikely(fm10k_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -265,8 +260,8 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
if (likely(size <= FM10K_RX_HDR_LEN)) {
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!fm10k_page_is_reserved(page)))
+ /* page is reusable, we can reuse buffer as-is */
+ if (dev_page_is_reusable(page))
return true;
/* this page cannot be reused so discard it */
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 5c19ff452558..2fb52bd6fc0e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1531,8 +1531,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
.ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
.ndo_features_check = fm10k_features_check,
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 118473dfdcbd..cd53981fa5e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_H_
#define _I40E_H_
@@ -213,14 +213,18 @@ struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
u8 flow_type;
- u8 ip4_proto;
+ u8 ipl4_proto;
/* TX packet view of src and dst */
__be32 dst_ip;
__be32 src_ip;
+ __be32 dst_ip6[4];
+ __be32 src_ip6[4];
__be16 src_port;
__be16 dst_port;
__be32 sctp_v_tag;
+ __be16 vlan_etype;
+ __be16 vlan_tag;
/* Flexible data to match within the packet payload */
__be16 flex_word;
u16 flex_offset;
@@ -289,6 +293,9 @@ struct i40e_cloud_filter {
u8 tunnel_type;
};
+#define I40E_DCB_PRIO_TYPE_STRICT 0
+#define I40E_DCB_PRIO_TYPE_ETS 1
+#define I40E_DCB_STRICT_PRIO_CREDITS 127
/* DCB per TC information data structure */
struct i40e_tc_info {
u16 qoffset; /* Queue offset from base queue */
@@ -474,6 +481,11 @@ struct i40e_pf {
u16 fd_sctp4_filter_cnt;
u16 fd_ip4_filter_cnt;
+ u16 fd_tcp6_filter_cnt;
+ u16 fd_udp6_filter_cnt;
+ u16 fd_sctp6_filter_cnt;
+ u16 fd_ip6_filter_cnt;
+
/* Flexible filter table values that need to be programmed into
* hardware, which expects L3 and L4 to be programmed separately. We
* need to ensure that the values are in ascended order and don't have
@@ -626,6 +638,8 @@ struct i40e_pf {
u16 dcbx_cap;
struct i40e_filter_control_settings filter_settings;
+ struct i40e_rx_pb_config pb_cfg; /* Current Rx packet buffer config */
+ struct i40e_dcbx_config tmp_cfg;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
@@ -1122,6 +1136,12 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
int i40e_count_filters(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
+static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
+{
+ return !!(pf->flags & I40E_FLAG_DISABLE_FW_LLDP);
+}
+
+void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable);
#ifdef CONFIG_I40E_DCB
void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
struct i40e_dcbx_config *old_cfg,
@@ -1131,6 +1151,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi);
bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
struct i40e_dcbx_config *old_cfg,
struct i40e_dcbx_config *new_cfg);
+int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg);
+int i40e_dcb_sw_default_config(struct i40e_pf *pf);
#endif /* CONFIG_I40E_DCB */
void i40e_ptp_rx_hang(struct i40e_pf *pf);
void i40e_ptp_tx_hang(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 1e960c3c7ef0..ce626eace692 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_ADMINQ_CMD_H_
#define _I40E_ADMINQ_CMD_H_
@@ -1080,6 +1080,7 @@ struct i40e_aqc_add_remove_control_packet_filter {
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002
#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008
+#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000
__le16 seid;
__le16 queue;
u8 reserved[2];
@@ -2184,6 +2185,14 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx
*/
struct i40e_aqc_lldp_set_local_mib {
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
__le16 length;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index adc9e4fa4789..ec19e18305ec 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#include "i40e.h"
#include "i40e_type.h"
@@ -3662,6 +3662,46 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
}
/**
+ * i40e_aq_set_lldp_mib - Set the LLDP MIB
+ * @hw: pointer to the hw struct
+ * @mib_type: Local, Remote or both Local and Remote MIBs
+ * @buff: pointer to a user supplied buffer to store the MIB block
+ * @buff_size: size of the buffer (in bytes)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the LLDP MIB.
+ **/
+enum i40e_status_code
+i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_lldp_set_local_mib *cmd;
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+
+ cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
+ if (buff_size == 0 || !buff)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_lldp_set_local_mib);
+ /* Indirect Command */
+ desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ if (buff_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.datalen = cpu_to_le16(buff_size);
+
+ cmd->type = mib_type;
+ cmd->length = cpu_to_le16(buff_size);
+ cmd->address_high = cpu_to_le32(upper_32_bits((uintptr_t)buff));
+ cmd->address_low = cpu_to_le32(lower_32_bits((uintptr_t)buff));
+
+ status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
+ return status;
+}
+
+/**
* i40e_aq_cfg_lldp_mib_change_event
* @hw: pointer to the hw struct
* @enable_update: Enable or Disable event posting
@@ -4480,6 +4520,29 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
}
/**
+ * i40e_aq_suspend_port_tx
+ * @hw: pointer to the hardware structure
+ * @seid: port seid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Suspend port's Tx traffic
+ **/
+i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aqc_tx_sched_ind *cmd;
+ struct i40e_aq_desc desc;
+ i40e_status status;
+
+ cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
+ cmd->vsi_seid = cpu_to_le16(seid);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_resume_port_tx
* @hw: pointer to the hardware structure
* @cmd_details: pointer to command details structure or NULL
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 9de503c5f99b..243b0d2b7b72 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#include "i40e_adminq.h"
#include "i40e_prototype.h"
@@ -933,6 +933,953 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
}
/**
+ * i40e_get_fw_lldp_status
+ * @hw: pointer to the hw struct
+ * @lldp_status: pointer to the status enum
+ *
+ * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
+ * Status of agent is reported via @lldp_status parameter.
+ **/
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status)
+{
+ struct i40e_virt_mem mem;
+ i40e_status ret;
+ u8 *lldpmib;
+
+ if (!lldp_status)
+ return I40E_ERR_PARAM;
+
+ /* Allocate buffer for the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
+ I40E_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (!ret) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ /* MIB is not available yet but the agent is running */
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ ret = 0;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
+ ret = 0;
+ }
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
+ * @tlv: Fill the ETS config data in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS CFG TLV
+ **/
+static void i40e_add_ieee_ets_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 priority0, priority1, maxtcwilling = 0;
+ struct i40e_dcb_ets_config *etscfg;
+ u16 offset = 0, typelength, i;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octet post subtype
+ * --------------------------
+ * |will-|CBS | Re- | Max |
+ * |ing | |served| TCs |
+ * --------------------------
+ * |1bit | 1bit|3 bits|3bits|
+ */
+ etscfg = &dcbcfg->etscfg;
+ if (etscfg->willing)
+ maxtcwilling = BIT(I40E_IEEE_ETS_WILLING_SHIFT);
+ maxtcwilling |= etscfg->maxtcs & I40E_IEEE_ETS_MAXTC_MASK;
+ buf[offset] = maxtcwilling;
+
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etscfg->prioritytable[i * 2] & 0xF;
+ priority1 = etscfg->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etscfg->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_etsrec_tlv - Prepare ETS Recommended TLV in IEEE format
+ * @tlv: Fill ETS Recommended TLV in IEEE format
+ * @dcbcfg: Local store which holds the DCB Config
+ *
+ * Prepare IEEE 802.1Qaz ETS REC TLV
+ **/
+static void i40e_add_ieee_etsrec_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ struct i40e_dcb_ets_config *etsrec;
+ u16 offset = 0, typelength, i;
+ u8 priority0, priority1;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_ETS_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_ETS_REC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ etsrec = &dcbcfg->etsrec;
+ /* First Octet is reserved */
+ /* Move offset to Priority Assignment Table */
+ offset++;
+
+ /* Priority Assignment Table (4 octets)
+ * Octets:| 1 | 2 | 3 | 4 |
+ * -----------------------------------------
+ * |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
+ * -----------------------------------------
+ * Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
+ * -----------------------------------------
+ */
+ for (i = 0; i < 4; i++) {
+ priority0 = etsrec->prioritytable[i * 2] & 0xF;
+ priority1 = etsrec->prioritytable[i * 2 + 1] & 0xF;
+ buf[offset] = (priority0 << I40E_IEEE_ETS_PRIO_1_SHIFT) |
+ priority1;
+ offset++;
+ }
+
+ /* TC Bandwidth Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tcbwtable[i];
+
+ /* TSA Assignment Table (8 octets)
+ * Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
+ * ---------------------------------
+ * |tc0|tc1|tc2|tc3|tc4|tc5|tc6|tc7|
+ * ---------------------------------
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
+ buf[offset++] = etsrec->tsatable[i];
+}
+
+/**
+ * i40e_add_ieee_pfc_tlv - Prepare PFC TLV in IEEE format
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store to get PFC CFG data
+ *
+ * Prepare IEEE 802.1Qaz PFC CFG TLV
+ **/
+static void i40e_add_ieee_pfc_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelength;
+
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ I40E_IEEE_PFC_TLV_LENGTH);
+ tlv->typelength = htons(typelength);
+
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_PFC_CFG);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* ----------------------------------------
+ * |will-|MBC | Re- | PFC | PFC Enable |
+ * |ing | |served| cap | |
+ * -----------------------------------------
+ * |1bit | 1bit|2 bits|4bits| 1 octet |
+ */
+ if (dcbcfg->pfc.willing)
+ buf[0] = BIT(I40E_IEEE_PFC_WILLING_SHIFT);
+
+ if (dcbcfg->pfc.mbc)
+ buf[0] |= BIT(I40E_IEEE_PFC_MBC_SHIFT);
+
+ buf[0] |= dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcenable;
+}
+
+/**
+ * i40e_add_ieee_app_pri_tlv - Prepare APP TLV in IEEE format
+ * @tlv: Fill APP TLV in IEEE format
+ * @dcbcfg: Local store to get APP CFG data
+ *
+ * Prepare IEEE 802.1Qaz APP CFG TLV
+ **/
+static void i40e_add_ieee_app_pri_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 typelength, length, offset = 0;
+ u8 priority, selector, i = 0;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+
+ /* No APP TLVs then just return */
+ if (dcbcfg->numapps == 0)
+ return;
+ ouisubtype = (u32)((I40E_IEEE_8021QAZ_OUI << I40E_LLDP_TLV_OUI_SHIFT) |
+ I40E_IEEE_SUBTYPE_APP_PRI);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Move offset to App Priority Table */
+ offset++;
+ /* Application Priority Table (3 octets)
+ * Octets:| 1 | 2 | 3 |
+ * -----------------------------------------
+ * |Priority|Rsrvd| Sel | Protocol ID |
+ * -----------------------------------------
+ * Bits:|23 21|20 19|18 16|15 0|
+ * -----------------------------------------
+ */
+ while (i < dcbcfg->numapps) {
+ priority = dcbcfg->app[i].priority & 0x7;
+ selector = dcbcfg->app[i].selector & 0x7;
+ buf[offset] = (priority << I40E_IEEE_APP_PRIO_SHIFT) | selector;
+ buf[offset + 1] = (dcbcfg->app[i].protocolid >> 0x8) & 0xFF;
+ buf[offset + 2] = dcbcfg->app[i].protocolid & 0xFF;
+ /* Move to next app */
+ offset += 3;
+ i++;
+ if (i >= I40E_DCBX_MAX_APPS)
+ break;
+ }
+ /* length includes size of ouisubtype + 1 reserved + 3*numapps */
+ length = sizeof(tlv->ouisubtype) + 1 + (i * 3);
+ typelength = (u16)((I40E_TLV_TYPE_ORG << I40E_LLDP_TLV_TYPE_SHIFT) |
+ (length & 0x1FF));
+ tlv->typelength = htons(typelength);
+}
+
+/**
+ * i40e_add_dcb_tlv - Add all IEEE TLVs
+ * @tlv: pointer to org tlv
+ * @dcbcfg: pointer to modified dcbx config structure *
+ * @tlvid: tlv id to be added
+ * add tlv information
+ **/
+static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
+ struct i40e_dcbx_config *dcbcfg,
+ u16 tlvid)
+{
+ switch (tlvid) {
+ case I40E_IEEE_TLV_ID_ETS_CFG:
+ i40e_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_ETS_REC:
+ i40e_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_PFC_CFG:
+ i40e_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case I40E_IEEE_TLV_ID_APP_PRI:
+ i40e_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * i40e_set_dcb_config - Set the local LLDP MIB to FW
+ * @hw: pointer to the hw struct
+ *
+ * Set DCB configuration to the Firmware
+ **/
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+{
+ struct i40e_dcbx_config *dcbcfg;
+ struct i40e_virt_mem mem;
+ u8 mib_type, *lldpmib;
+ i40e_status ret;
+ u16 miblen;
+
+ /* update the hw local config */
+ dcbcfg = &hw->local_dcbx_config;
+ /* Allocate the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
+ if (ret)
+ return ret;
+
+ mib_type = SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB;
+ if (dcbcfg->app_mode == I40E_DCBX_APPS_NON_WILLING) {
+ mib_type |= SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS <<
+ SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT;
+ }
+ lldpmib = (u8 *)mem.va;
+ i40e_dcb_config_to_lldp(lldpmib, &miblen, dcbcfg);
+ ret = i40e_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, miblen, NULL);
+
+ i40e_free_virt_mem(hw, &mem);
+ return ret;
+}
+
+/**
+ * i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
+ * @lldpmib: pointer to mib to be output
+ * @miblen: pointer to u16 for length of lldpmib
+ * @dcbcfg: store for LLDPDU data
+ *
+ * send DCB configuration to FW
+ **/
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
+{
+ u16 length, offset = 0, tlvid, typelength;
+ struct i40e_lldp_org_tlv *tlv;
+
+ tlv = (struct i40e_lldp_org_tlv *)lldpmib;
+ tlvid = I40E_TLV_ID_START;
+ do {
+ i40e_add_dcb_tlv(tlv, dcbcfg, tlvid++);
+ typelength = ntohs(tlv->typelength);
+ length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
+ I40E_LLDP_TLV_LEN_SHIFT);
+ if (length)
+ offset += length + I40E_IEEE_TLV_HEADER_LENGTH;
+ /* END TLV or beyond LLDPDU size */
+ if (tlvid >= I40E_TLV_ID_END_OF_LLDPPDU ||
+ offset >= I40E_LLDPDU_SIZE)
+ break;
+ /* Move to next TLV */
+ if (length)
+ tlv = (struct i40e_lldp_org_tlv *)((char *)tlv +
+ sizeof(tlv->typelength) + length);
+ } while (tlvid < I40E_TLV_ID_END_OF_LLDPPDU);
+ *miblen = offset;
+ return I40E_SUCCESS;
+}
+
+/**
+ * i40e_dcb_hw_rx_fifo_config
+ * @hw: pointer to the hw struct
+ * @ets_mode: Strict Priority or Round Robin mode
+ * @non_ets_mode: Strict Priority or Round Robin
+ * @max_exponent: Exponent to calculate max refill credits
+ * @lltc_map: Low latency TC bitmap
+ *
+ * Configure HW Rx FIFO as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
+ enum i40e_dcb_arbiter_mode ets_mode,
+ enum i40e_dcb_arbiter_mode non_ets_mode,
+ u32 max_exponent,
+ u8 lltc_map)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_RETSC);
+
+ reg &= ~I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+ reg |= ((u32)ets_mode << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT) &
+ I40E_PRTDCB_RETSC_ETS_MODE_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+ reg |= ((u32)non_ets_mode << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT) &
+ I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+ reg |= (max_exponent << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT) &
+ I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK;
+
+ reg &= ~I40E_PRTDCB_RETSC_LLTC_MASK;
+ reg |= (lltc_map << I40E_PRTDCB_RETSC_LLTC_SHIFT) &
+ I40E_PRTDCB_RETSC_LLTC_MASK;
+ wr32(hw, I40E_PRTDCB_RETSC, reg);
+}
+
+/**
+ * i40e_dcb_hw_rx_cmd_monitor_config
+ * @hw: pointer to the hw struct
+ * @num_tc: Total number of traffic class
+ * @num_ports: Total number of ports on device
+ *
+ * Configure HW Rx command monitor as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
+ u8 num_tc, u8 num_ports)
+{
+ u32 threshold;
+ u32 fifo_size;
+ u32 reg;
+
+ /* Set the threshold and fifo_size based on number of ports */
+ switch (num_ports) {
+ case 1:
+ threshold = I40E_DCB_1_PORT_THRESHOLD;
+ fifo_size = I40E_DCB_1_PORT_FIFO_SIZE;
+ break;
+ case 2:
+ if (num_tc > 4) {
+ threshold = I40E_DCB_2_PORT_THRESHOLD_HIGH_NUM_TC;
+ fifo_size = I40E_DCB_2_PORT_FIFO_SIZE_HIGH_NUM_TC;
+ } else {
+ threshold = I40E_DCB_2_PORT_THRESHOLD_LOW_NUM_TC;
+ fifo_size = I40E_DCB_2_PORT_FIFO_SIZE_LOW_NUM_TC;
+ }
+ break;
+ case 4:
+ if (num_tc > 4) {
+ threshold = I40E_DCB_4_PORT_THRESHOLD_HIGH_NUM_TC;
+ fifo_size = I40E_DCB_4_PORT_FIFO_SIZE_HIGH_NUM_TC;
+ } else {
+ threshold = I40E_DCB_4_PORT_THRESHOLD_LOW_NUM_TC;
+ fifo_size = I40E_DCB_4_PORT_FIFO_SIZE_LOW_NUM_TC;
+ }
+ break;
+ default:
+ i40e_debug(hw, I40E_DEBUG_DCB, "Invalid num_ports %u.\n",
+ (u32)num_ports);
+ return;
+ }
+
+ /* The hardware manual describes setting up of I40E_PRT_SWR_PM_THR
+ * based on the number of ports and traffic classes for a given port as
+ * part of DCB configuration.
+ */
+ reg = rd32(hw, I40E_PRT_SWR_PM_THR);
+ reg &= ~I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ reg |= (threshold << I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT) &
+ I40E_PRT_SWR_PM_THR_THRESHOLD_MASK;
+ wr32(hw, I40E_PRT_SWR_PM_THR, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_RPPMC);
+ reg &= ~I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ reg |= (fifo_size << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT) &
+ I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK;
+ wr32(hw, I40E_PRTDCB_RPPMC, reg);
+}
+
+/**
+ * i40e_dcb_hw_pfc_config
+ * @hw: pointer to the hw struct
+ * @pfc_en: Bitmap of PFC enabled priorities
+ * @prio_tc: priority to tc assignment indexed by priority
+ *
+ * Configure HW Priority Flow Controller as part of DCB configuration.
+ **/
+void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
+ u8 pfc_en, u8 *prio_tc)
+{
+ u16 refresh_time = (u16)I40E_DEFAULT_PAUSE_TIME / 2;
+ u32 link_speed = hw->phy.link_info.link_speed;
+ u8 first_pfc_prio = 0;
+ u8 num_pfc_tc = 0;
+ u8 tc2pfc = 0;
+ u32 reg;
+ u8 i;
+
+ /* Get Number of PFC TCs and TC2PFC map */
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ if (pfc_en & BIT(i)) {
+ if (!first_pfc_prio)
+ first_pfc_prio = i;
+ /* Set bit for the PFC TC */
+ tc2pfc |= BIT(prio_tc[i]);
+ num_pfc_tc++;
+ }
+ }
+
+ switch (link_speed) {
+ case I40E_LINK_SPEED_10GB:
+ reg = rd32(hw, I40E_PRTDCB_MFLCN);
+ reg |= BIT(I40E_PRTDCB_MFLCN_DPF_SHIFT) &
+ I40E_PRTDCB_MFLCN_DPF_MASK;
+ reg &= ~I40E_PRTDCB_MFLCN_RFCE_MASK;
+ reg &= ~I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ if (pfc_en) {
+ reg |= BIT(I40E_PRTDCB_MFLCN_RPFCM_SHIFT) &
+ I40E_PRTDCB_MFLCN_RPFCM_MASK;
+ reg |= ((u32)pfc_en << I40E_PRTDCB_MFLCN_RPFCE_SHIFT) &
+ I40E_PRTDCB_MFLCN_RPFCE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_MFLCN, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_FCCFG);
+ reg &= ~I40E_PRTDCB_FCCFG_TFCE_MASK;
+ if (pfc_en)
+ reg |= (I40E_DCB_PFC_ENABLED <<
+ I40E_PRTDCB_FCCFG_TFCE_SHIFT) &
+ I40E_PRTDCB_FCCFG_TFCE_MASK;
+ wr32(hw, I40E_PRTDCB_FCCFG, reg);
+
+ /* FCTTV and FCRTV to be set by default */
+ break;
+ case I40E_LINK_SPEED_40GB:
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK;
+ reg |= BIT(I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ reg |= ((u32)pfc_en <<
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE, reg);
+
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE);
+ reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ reg |= ((u32)pfc_en <<
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK;
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE, reg);
+
+ for (i = 0; i < I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX; i++) {
+ reg = rd32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i));
+ reg &= ~I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ if (pfc_en) {
+ reg |= ((u32)refresh_time <<
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT) &
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK;
+ }
+ wr32(hw, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(i), reg);
+ }
+ /* PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA default value is 0xFFFF
+ * for all user priorities
+ */
+ break;
+ }
+
+ reg = rd32(hw, I40E_PRTDCB_TC2PFC);
+ reg &= ~I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ reg |= ((u32)tc2pfc << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT) &
+ I40E_PRTDCB_TC2PFC_TC2PFC_MASK;
+ wr32(hw, I40E_PRTDCB_TC2PFC, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_RUP);
+ reg &= ~I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ reg |= ((u32)first_pfc_prio << I40E_PRTDCB_RUP_NOVLANUP_SHIFT) &
+ I40E_PRTDCB_RUP_NOVLANUP_MASK;
+ wr32(hw, I40E_PRTDCB_RUP, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_TDPMC);
+ reg &= ~I40E_PRTDCB_TDPMC_TCPM_MODE_MASK;
+ if (num_pfc_tc > I40E_DCB_PFC_FORCED_NUM_TC) {
+ reg |= BIT(I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT) &
+ I40E_PRTDCB_TDPMC_TCPM_MODE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_TDPMC, reg);
+
+ reg = rd32(hw, I40E_PRTDCB_TCPMC);
+ reg &= ~I40E_PRTDCB_TCPMC_TCPM_MODE_MASK;
+ if (num_pfc_tc > I40E_DCB_PFC_FORCED_NUM_TC) {
+ reg |= BIT(I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT) &
+ I40E_PRTDCB_TCPMC_TCPM_MODE_MASK;
+ }
+ wr32(hw, I40E_PRTDCB_TCPMC, reg);
+}
+
+/**
+ * i40e_dcb_hw_set_num_tc
+ * @hw: pointer to the hw struct
+ * @num_tc: number of traffic classes
+ *
+ * Configure number of traffic classes in HW
+ **/
+void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_GENC);
+
+ reg &= ~I40E_PRTDCB_GENC_NUMTC_MASK;
+ reg |= ((u32)num_tc << I40E_PRTDCB_GENC_NUMTC_SHIFT) &
+ I40E_PRTDCB_GENC_NUMTC_MASK;
+ wr32(hw, I40E_PRTDCB_GENC, reg);
+}
+
+/**
+ * i40e_dcb_hw_get_num_tc
+ * @hw: pointer to the hw struct
+ *
+ * Returns number of traffic classes configured in HW
+ **/
+u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_GENC);
+
+ return (u8)((reg & I40E_PRTDCB_GENC_NUMTC_MASK) >>
+ I40E_PRTDCB_GENC_NUMTC_SHIFT);
+}
+
+/**
+ * i40e_dcb_hw_rx_ets_bw_config
+ * @hw: pointer to the hw struct
+ * @bw_share: Bandwidth share indexed per traffic class
+ * @mode: Strict Priority or Round Robin mode between UP sharing same
+ * traffic class
+ * @prio_type: TC is ETS enabled or strict priority
+ *
+ * Configure HW Rx ETS bandwidth as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
+ u8 *mode, u8 *prio_type)
+{
+ u32 reg;
+ u8 i;
+
+ for (i = 0; i <= I40E_PRTDCB_RETSTCC_MAX_INDEX; i++) {
+ reg = rd32(hw, I40E_PRTDCB_RETSTCC(i));
+ reg &= ~(I40E_PRTDCB_RETSTCC_BWSHARE_MASK |
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK |
+ I40E_PRTDCB_RETSTCC_ETSTC_SHIFT);
+ reg |= ((u32)bw_share[i] << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_BWSHARE_MASK;
+ reg |= ((u32)mode[i] << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT) &
+ I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK;
+ reg |= ((u32)prio_type[i] << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT) &
+ I40E_PRTDCB_RETSTCC_ETSTC_MASK;
+ wr32(hw, I40E_PRTDCB_RETSTCC(i), reg);
+ }
+}
+
+/**
+ * i40e_dcb_hw_rx_ets_bw_config
+ * @hw: pointer to the hw struct
+ * @prio_tc: priority to tc assignment indexed by priority
+ *
+ * Configure HW Rx UP2TC map as part of DCB configuration.
+ **/
+void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc)
+{
+ u32 reg = rd32(hw, I40E_PRTDCB_RUP2TC);
+#define I40E_UP2TC_REG(val, i) \
+ (((val) << I40E_PRTDCB_RUP2TC_UP##i##TC_SHIFT) & \
+ I40E_PRTDCB_RUP2TC_UP##i##TC_MASK)
+
+ reg |= I40E_UP2TC_REG(prio_tc[0], 0);
+ reg |= I40E_UP2TC_REG(prio_tc[1], 1);
+ reg |= I40E_UP2TC_REG(prio_tc[2], 2);
+ reg |= I40E_UP2TC_REG(prio_tc[3], 3);
+ reg |= I40E_UP2TC_REG(prio_tc[4], 4);
+ reg |= I40E_UP2TC_REG(prio_tc[5], 5);
+ reg |= I40E_UP2TC_REG(prio_tc[6], 6);
+ reg |= I40E_UP2TC_REG(prio_tc[7], 7);
+
+ wr32(hw, I40E_PRTDCB_RUP2TC, reg);
+}
+
+/**
+ * i40e_dcb_hw_calculate_pool_sizes - configure dcb pool sizes
+ * @hw: pointer to the hw struct
+ * @num_ports: Number of available ports on the device
+ * @eee_enabled: EEE enabled for the given port
+ * @pfc_en: Bit map of PFC enabled traffic classes
+ * @mfs_tc: Array of max frame size for each traffic class
+ * @pb_cfg: pointer to packet buffer configuration
+ *
+ * Calculate the shared and dedicated per TC pool sizes,
+ * watermarks and threshold values.
+ **/
+void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
+ u8 num_ports, bool eee_enabled,
+ u8 pfc_en, u32 *mfs_tc,
+ struct i40e_rx_pb_config *pb_cfg)
+{
+ u32 pool_size[I40E_MAX_TRAFFIC_CLASS];
+ u32 high_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 low_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 total_pool_size = 0;
+ int shared_pool_size; /* Need signed variable */
+ u32 port_pb_size;
+ u32 mfs_max = 0;
+ u32 pcirtt;
+ u8 i;
+
+ /* Get the MFS(max) for the port */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (mfs_tc[i] > mfs_max)
+ mfs_max = mfs_tc[i];
+ }
+
+ pcirtt = I40E_BT2B(I40E_PCIRTT_LINK_SPEED_10G);
+
+ /* Calculate effective Rx PB size per port */
+ port_pb_size = I40E_DEVICE_RPB_SIZE / num_ports;
+ if (eee_enabled)
+ port_pb_size -= I40E_BT2B(I40E_EEE_TX_LPI_EXIT_TIME);
+ port_pb_size -= mfs_max;
+
+ /* Step 1 Calculating tc pool/shared pool sizes and watermarks */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (pfc_en & BIT(i)) {
+ low_wm[i] = (I40E_DCB_WATERMARK_START_FACTOR *
+ mfs_tc[i]) + pcirtt;
+ high_wm[i] = low_wm[i];
+ high_wm[i] += ((mfs_max > I40E_MAX_FRAME_SIZE)
+ ? mfs_max : I40E_MAX_FRAME_SIZE);
+ pool_size[i] = high_wm[i];
+ pool_size[i] += I40E_BT2B(I40E_STD_DV_TC(mfs_max,
+ mfs_tc[i]));
+ } else {
+ low_wm[i] = 0;
+ pool_size[i] = (I40E_DCB_WATERMARK_START_FACTOR *
+ mfs_tc[i]) + pcirtt;
+ high_wm[i] = pool_size[i];
+ }
+ total_pool_size += pool_size[i];
+ }
+
+ shared_pool_size = port_pb_size - total_pool_size;
+ if (shared_pool_size > 0) {
+ pb_cfg->shared_pool_size = shared_pool_size;
+ pb_cfg->shared_pool_high_wm = shared_pool_size;
+ pb_cfg->shared_pool_low_wm = 0;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pb_cfg->shared_pool_low_thresh[i] = 0;
+ pb_cfg->shared_pool_high_thresh[i] = shared_pool_size;
+ pb_cfg->tc_pool_size[i] = pool_size[i];
+ pb_cfg->tc_pool_high_wm[i] = high_wm[i];
+ pb_cfg->tc_pool_low_wm[i] = low_wm[i];
+ }
+
+ } else {
+ i40e_debug(hw, I40E_DEBUG_DCB,
+ "The shared pool size for the port is negative %d.\n",
+ shared_pool_size);
+ }
+}
+
+/**
+ * i40e_dcb_hw_rx_pb_config
+ * @hw: pointer to the hw struct
+ * @old_pb_cfg: Existing Rx Packet buffer configuration
+ * @new_pb_cfg: New Rx Packet buffer configuration
+ *
+ * Program the Rx Packet Buffer registers.
+ **/
+void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+ struct i40e_rx_pb_config *old_pb_cfg,
+ struct i40e_rx_pb_config *new_pb_cfg)
+{
+ u32 old_val;
+ u32 new_val;
+ u32 reg;
+ u8 i;
+
+ /* The Rx Packet buffer register programming needs to be done in a
+ * certain order and the following code is based on that
+ * requirement.
+ */
+
+ /* Program the shared pool low water mark per port if decreasing */
+ old_val = old_pb_cfg->shared_pool_low_wm;
+ new_val = new_pb_cfg->shared_pool_low_wm;
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLW);
+ reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
+ I40E_PRTRPB_SLW_SLW_MASK;
+ wr32(hw, I40E_PRTRPB_SLW, reg);
+ }
+
+ /* Program the shared pool low threshold and tc pool
+ * low water mark per TC that are decreasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_low_thresh[i];
+ new_val = new_pb_cfg->shared_pool_low_thresh[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLT(i));
+ reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
+ I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SLT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_low_wm[i];
+ new_val = new_pb_cfg->tc_pool_low_wm[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DLW(i));
+ reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
+ I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DLW(i), reg);
+ }
+ }
+
+ /* Program the shared pool high water mark per port if decreasing */
+ old_val = old_pb_cfg->shared_pool_high_wm;
+ new_val = new_pb_cfg->shared_pool_high_wm;
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHW);
+ reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
+ I40E_PRTRPB_SHW_SHW_MASK;
+ wr32(hw, I40E_PRTRPB_SHW, reg);
+ }
+
+ /* Program the shared pool high threshold and tc pool
+ * high water mark per TC that are decreasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_high_thresh[i];
+ new_val = new_pb_cfg->shared_pool_high_thresh[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHT(i));
+ reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
+ I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SHT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_high_wm[i];
+ new_val = new_pb_cfg->tc_pool_high_wm[i];
+ if (new_val < old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DHW(i));
+ reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
+ I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DHW(i), reg);
+ }
+ }
+
+ /* Write Dedicated Pool Sizes per TC */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ new_val = new_pb_cfg->tc_pool_size[i];
+ reg = rd32(hw, I40E_PRTRPB_DPS(i));
+ reg &= ~I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DPS_DPS_TCN_SHIFT) &
+ I40E_PRTRPB_DPS_DPS_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DPS(i), reg);
+ }
+
+ /* Write Shared Pool Size per port */
+ new_val = new_pb_cfg->shared_pool_size;
+ reg = rd32(hw, I40E_PRTRPB_SPS);
+ reg &= ~I40E_PRTRPB_SPS_SPS_MASK;
+ reg |= (new_val << I40E_PRTRPB_SPS_SPS_SHIFT) &
+ I40E_PRTRPB_SPS_SPS_MASK;
+ wr32(hw, I40E_PRTRPB_SPS, reg);
+
+ /* Program the shared pool low water mark per port if increasing */
+ old_val = old_pb_cfg->shared_pool_low_wm;
+ new_val = new_pb_cfg->shared_pool_low_wm;
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLW);
+ reg &= ~I40E_PRTRPB_SLW_SLW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLW_SLW_SHIFT) &
+ I40E_PRTRPB_SLW_SLW_MASK;
+ wr32(hw, I40E_PRTRPB_SLW, reg);
+ }
+
+ /* Program the shared pool low threshold and tc pool
+ * low water mark per TC that are increasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_low_thresh[i];
+ new_val = new_pb_cfg->shared_pool_low_thresh[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SLT(i));
+ reg &= ~I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SLT_SLT_TCN_SHIFT) &
+ I40E_PRTRPB_SLT_SLT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SLT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_low_wm[i];
+ new_val = new_pb_cfg->tc_pool_low_wm[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DLW(i));
+ reg &= ~I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DLW_DLW_TCN_SHIFT) &
+ I40E_PRTRPB_DLW_DLW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DLW(i), reg);
+ }
+ }
+
+ /* Program the shared pool high water mark per port if increasing */
+ old_val = old_pb_cfg->shared_pool_high_wm;
+ new_val = new_pb_cfg->shared_pool_high_wm;
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHW);
+ reg &= ~I40E_PRTRPB_SHW_SHW_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHW_SHW_SHIFT) &
+ I40E_PRTRPB_SHW_SHW_MASK;
+ wr32(hw, I40E_PRTRPB_SHW, reg);
+ }
+
+ /* Program the shared pool high threshold and tc pool
+ * high water mark per TC that are increasing.
+ */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ old_val = old_pb_cfg->shared_pool_high_thresh[i];
+ new_val = new_pb_cfg->shared_pool_high_thresh[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_SHT(i));
+ reg &= ~I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_SHT_SHT_TCN_SHIFT) &
+ I40E_PRTRPB_SHT_SHT_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_SHT(i), reg);
+ }
+
+ old_val = old_pb_cfg->tc_pool_high_wm[i];
+ new_val = new_pb_cfg->tc_pool_high_wm[i];
+ if (new_val > old_val) {
+ reg = rd32(hw, I40E_PRTRPB_DHW(i));
+ reg &= ~I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ reg |= (new_val << I40E_PRTRPB_DHW_DHW_TCN_SHIFT) &
+ I40E_PRTRPB_DHW_DHW_TCN_MASK;
+ wr32(hw, I40E_PRTRPB_DHW(i), reg);
+ }
+ }
+}
+
+/**
* _i40e_read_lldp_cfg - generic read of LLDP Configuration data from NVM
* @hw: pointer to the HW structure
* @lldp_cfg: pointer to hold lldp configuration variables
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2b1a2e81ac73..2370ceecb061 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_DCB_H_
#define _I40E_DCB_H_
#include "i40e_type.h"
+#define I40E_DCBX_STATUS_NOT_STARTED 0
#define I40E_DCBX_STATUS_IN_PROGRESS 1
#define I40E_DCBX_STATUS_DONE 2
+#define I40E_DCBX_STATUS_MULTIPLE_PEERS 3
#define I40E_DCBX_STATUS_DISABLED 7
#define I40E_TLV_TYPE_END 0
@@ -22,6 +24,7 @@
#define I40E_CEE_DCBX_OUI 0x001b21
#define I40E_CEE_DCBX_TYPE 2
+#define I40E_CEE_SUBTYPE_CTRL 1
#define I40E_CEE_SUBTYPE_PG_CFG 2
#define I40E_CEE_SUBTYPE_PFC_CFG 3
#define I40E_CEE_SUBTYPE_APP_PRI 4
@@ -64,6 +67,8 @@
#define I40E_IEEE_TSA_ETS 2
/* Defines for IEEE PFC TLV */
+#define I40E_DCB_PFC_ENABLED 2
+#define I40E_DCB_PFC_FORCED_NUM_TC 2
#define I40E_IEEE_PFC_CAP_SHIFT 0
#define I40E_IEEE_PFC_CAP_MASK (0xF << I40E_IEEE_PFC_CAP_SHIFT)
#define I40E_IEEE_PFC_MBC_SHIFT 6
@@ -77,9 +82,30 @@
#define I40E_IEEE_APP_PRIO_SHIFT 5
#define I40E_IEEE_APP_PRIO_MASK (0x7 << I40E_IEEE_APP_PRIO_SHIFT)
+/* TLV definitions for preparing MIB */
+#define I40E_TLV_ID_CHASSIS_ID 0
+#define I40E_TLV_ID_PORT_ID 1
+#define I40E_TLV_ID_TIME_TO_LIVE 2
+#define I40E_IEEE_TLV_ID_ETS_CFG 3
+#define I40E_IEEE_TLV_ID_ETS_REC 4
+#define I40E_IEEE_TLV_ID_PFC_CFG 5
+#define I40E_IEEE_TLV_ID_APP_PRI 6
+#define I40E_TLV_ID_END_OF_LLDPPDU 7
+#define I40E_TLV_ID_START I40E_IEEE_TLV_ID_ETS_CFG
-#pragma pack(1)
+#define I40E_IEEE_TLV_HEADER_LENGTH 2
+#define I40E_IEEE_ETS_TLV_LENGTH 25
+#define I40E_IEEE_PFC_TLV_LENGTH 6
+#define I40E_IEEE_APP_TLV_LENGTH 11
+
+/* Defines for default SW DCB config */
+#define I40E_IEEE_DEFAULT_ETS_TCBW 100
+#define I40E_IEEE_DEFAULT_ETS_WILLING 1
+#define I40E_IEEE_DEFAULT_PFC_WILLING 1
+#define I40E_IEEE_DEFAULT_NUM_APPS 1
+#define I40E_IEEE_DEFAULT_APP_PRIO 3
+#pragma pack(1)
/* IEEE 802.1AB LLDP Organization specific TLV */
struct i40e_lldp_org_tlv {
__be16 typelength;
@@ -102,7 +128,9 @@ struct i40e_cee_ctrl_tlv {
struct i40e_cee_feat_tlv {
struct i40e_cee_tlv_hdr hdr;
u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
+#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
+#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
u8 subtype;
u8 tlvinfo[1];
};
@@ -116,13 +144,140 @@ struct i40e_cee_app_prio {
};
#pragma pack()
+enum i40e_get_fw_lldp_status_resp {
+ I40E_GET_FW_LLDP_STATUS_DISABLED = 0,
+ I40E_GET_FW_LLDP_STATUS_ENABLED = 1
+};
+
+/* Data structures to pass for SW DCBX */
+struct i40e_rx_pb_config {
+ u32 shared_pool_size;
+ u32 shared_pool_high_wm;
+ u32 shared_pool_low_wm;
+ u32 shared_pool_high_thresh[I40E_MAX_TRAFFIC_CLASS];
+ u32 shared_pool_low_thresh[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_size[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_high_wm[I40E_MAX_TRAFFIC_CLASS];
+ u32 tc_pool_low_wm[I40E_MAX_TRAFFIC_CLASS];
+};
+
+enum i40e_dcb_arbiter_mode {
+ I40E_DCB_ARB_MODE_STRICT_PRIORITY = 0,
+ I40E_DCB_ARB_MODE_ROUND_ROBIN = 1
+};
+
+#define I40E_DCB_DEFAULT_MAX_EXPONENT 0xB
+#define I40E_DEFAULT_PAUSE_TIME 0xffff
+#define I40E_MAX_FRAME_SIZE 4608 /* 4.5 KB */
+
+#define I40E_DEVICE_RPB_SIZE 968000 /* 968 KB */
+
+/* BitTimes (BT) conversion */
+#define I40E_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024))
+#define I40E_B2BT(BT) ((BT) * 8)
+#define I40E_BT2B(BT) (((BT) + (8 - 1)) / 8)
+
+/* Max Frame(TC) = MFS(max) + MFS(TC) */
+#define I40E_MAX_FRAME_TC(mfs_max, mfs_tc) I40E_B2BT((mfs_max) + (mfs_tc))
+
+/* EEE Tx LPI Exit time in Bit Times */
+#define I40E_EEE_TX_LPI_EXIT_TIME 142500
+
+/* PCI Round Trip Time in Bit Times */
+#define I40E_PCIRTT_LINK_SPEED_10G 20000
+#define I40E_PCIRTT_BYTE_LINK_SPEED_20G 40000
+#define I40E_PCIRTT_BYTE_LINK_SPEED_40G 80000
+
+/* PFC Frame Delay Bit Times */
+#define I40E_PFC_FRAME_DELAY 672
+
+/* Worst case Cable (10GBase-T) Delay Bit Times */
+#define I40E_CABLE_DELAY 5556
+
+/* Higher Layer Delay @10G Bit Times */
+#define I40E_HIGHER_LAYER_DELAY_10G 6144
+
+/* Interface Delays in Bit Times */
+/* TODO: Add for other link speeds 20G/40G/etc. */
+#define I40E_INTERFACE_DELAY_10G_MAC_CONTROL 8192
+#define I40E_INTERFACE_DELAY_10G_MAC 8192
+#define I40E_INTERFACE_DELAY_10G_RS 8192
+
+#define I40E_INTERFACE_DELAY_XGXS 2048
+#define I40E_INTERFACE_DELAY_XAUI 2048
+
+#define I40E_INTERFACE_DELAY_10G_BASEX_PCS 2048
+#define I40E_INTERFACE_DELAY_10G_BASER_PCS 3584
+#define I40E_INTERFACE_DELAY_LX4_PMD 512
+#define I40E_INTERFACE_DELAY_CX4_PMD 512
+#define I40E_INTERFACE_DELAY_SERIAL_PMA 512
+#define I40E_INTERFACE_DELAY_PMD 512
+
+#define I40E_INTERFACE_DELAY_10G_BASET 25600
+
+/* Hardware RX DCB config related defines */
+#define I40E_DCB_1_PORT_THRESHOLD 0xF
+#define I40E_DCB_1_PORT_FIFO_SIZE 0x10
+#define I40E_DCB_2_PORT_THRESHOLD_LOW_NUM_TC 0xF
+#define I40E_DCB_2_PORT_FIFO_SIZE_LOW_NUM_TC 0x10
+#define I40E_DCB_2_PORT_THRESHOLD_HIGH_NUM_TC 0xC
+#define I40E_DCB_2_PORT_FIFO_SIZE_HIGH_NUM_TC 0x8
+#define I40E_DCB_4_PORT_THRESHOLD_LOW_NUM_TC 0x9
+#define I40E_DCB_4_PORT_FIFO_SIZE_LOW_NUM_TC 0x8
+#define I40E_DCB_4_PORT_THRESHOLD_HIGH_NUM_TC 0x6
+#define I40E_DCB_4_PORT_FIFO_SIZE_HIGH_NUM_TC 0x4
+#define I40E_DCB_WATERMARK_START_FACTOR 0x2
+
+/* delay values for with 10G BaseT in Bit Times */
+#define I40E_INTERFACE_DELAY_10G_COPPER \
+ (I40E_INTERFACE_DELAY_10G_MAC + (2 * I40E_INTERFACE_DELAY_XAUI) \
+ + I40E_INTERFACE_DELAY_10G_BASET)
+#define I40E_DV_TC(mfs_max, mfs_tc) \
+ ((2 * I40E_MAX_FRAME_TC(mfs_max, mfs_tc)) \
+ + I40E_PFC_FRAME_DELAY \
+ + (2 * I40E_CABLE_DELAY) \
+ + (2 * I40E_INTERFACE_DELAY_10G_COPPER) \
+ + I40E_HIGHER_LAYER_DELAY_10G)
+static inline u32 I40E_STD_DV_TC(u32 mfs_max, u32 mfs_tc)
+{
+ return I40E_DV_TC(mfs_max, mfs_tc) + I40E_B2BT(mfs_max);
+}
+
+/* APIs for SW DCBX */
+void i40e_dcb_hw_rx_fifo_config(struct i40e_hw *hw,
+ enum i40e_dcb_arbiter_mode ets_mode,
+ enum i40e_dcb_arbiter_mode non_ets_mode,
+ u32 max_exponent, u8 lltc_map);
+void i40e_dcb_hw_rx_cmd_monitor_config(struct i40e_hw *hw,
+ u8 num_tc, u8 num_ports);
+void i40e_dcb_hw_pfc_config(struct i40e_hw *hw,
+ u8 pfc_en, u8 *prio_tc);
+void i40e_dcb_hw_set_num_tc(struct i40e_hw *hw, u8 num_tc);
+u8 i40e_dcb_hw_get_num_tc(struct i40e_hw *hw);
+void i40e_dcb_hw_rx_ets_bw_config(struct i40e_hw *hw, u8 *bw_share,
+ u8 *mode, u8 *prio_type);
+void i40e_dcb_hw_rx_up2tc_config(struct i40e_hw *hw, u8 *prio_tc);
+void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
+ u8 num_ports, bool eee_enabled,
+ u8 pfc_en, u32 *mfs_tc,
+ struct i40e_rx_pb_config *pb_cfg);
+void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
+ struct i40e_rx_pb_config *old_pb_cfg,
+ struct i40e_rx_pb_config *new_pb_cfg);
i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
- u16 *status);
+ u16 *status);
i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg);
+ struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg);
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change);
+i40e_status i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status);
+i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
+i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 9deae9a35423..0345132a0ef5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -1,10 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifdef CONFIG_I40E_DCB
#include "i40e.h"
#include <net/dcbnl.h>
+#define I40E_DCBNL_STATUS_SUCCESS 0
+#define I40E_DCBNL_STATUS_ERROR 1
+static bool i40e_dcbnl_find_app(struct i40e_dcbx_config *cfg,
+ struct i40e_dcb_app_priority_table *app);
/**
* i40e_get_pfc_delay - retrieve PFC Link Delay
* @hw: pointer to hardware struct
@@ -33,14 +37,13 @@ static int i40e_dcbnl_ieee_getets(struct net_device *dev,
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
struct i40e_dcbx_config *dcbxcfg;
- struct i40e_hw *hw = &pf->hw;
if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
- dcbxcfg = &hw->local_dcbx_config;
+ dcbxcfg = &pf->hw.local_dcbx_config;
ets->willing = dcbxcfg->etscfg.willing;
- ets->ets_cap = dcbxcfg->etscfg.maxtcs;
+ ets->ets_cap = I40E_MAX_TRAFFIC_CLASS;
ets->cbs = dcbxcfg->etscfg.cbs;
memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable,
sizeof(ets->tc_tx_bw));
@@ -84,7 +87,7 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
pfc->mbc = dcbxcfg->pfc.mbc;
i40e_get_pfc_delay(hw, &pfc->delay);
- /* Get Requests/Indicatiosn */
+ /* Get Requests/Indications */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
pfc->requests[i] = pf->stats.priority_xoff_tx[i];
pfc->indications[i] = pf->stats.priority_xoff_rx[i];
@@ -94,6 +97,713 @@ static int i40e_dcbnl_ieee_getpfc(struct net_device *dev,
}
/**
+ * i40e_dcbnl_ieee_setets - set IEEE ETS configuration
+ * @netdev: the corresponding netdev
+ * @ets: structure to hold the ETS information
+ *
+ * Set IEEE ETS configuration
+ **/
+static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int i, ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+
+ /* Update the ETS configuration for temp */
+ pf->tmp_cfg.etscfg.willing = ets->willing;
+ pf->tmp_cfg.etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
+ pf->tmp_cfg.etscfg.cbs = ets->cbs;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ pf->tmp_cfg.etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
+ pf->tmp_cfg.etscfg.tsatable[i] = ets->tc_tsa[i];
+ pf->tmp_cfg.etscfg.prioritytable[i] = ets->prio_tc[i];
+ pf->tmp_cfg.etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
+ pf->tmp_cfg.etsrec.tsatable[i] = ets->tc_reco_tsa[i];
+ pf->tmp_cfg.etsrec.prioritytable[i] = ets->reco_prio_tc[i];
+ }
+
+ /* Commit changes to HW */
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB ETS configuration err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_setpfc - set local IEEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @pfc: structure to hold the PFC information
+ *
+ * Sets local IEEE PFC configuration
+ **/
+static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+ if (pfc->pfc_cap)
+ pf->tmp_cfg.pfc.pfccap = pfc->pfc_cap;
+ else
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ pf->tmp_cfg.pfc.pfcenable = pfc->pfc_en;
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB PFC configuration err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_setapp - set local IEEE App configuration
+ * @netdev: the corresponding netdev
+ * @app: structure to hold the Application information
+ *
+ * Sets local IEEE App configuration
+ **/
+static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcb_app_priority_table new_app;
+ struct i40e_dcbx_config *old_cfg;
+ int ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ if (old_cfg->numapps == I40E_DCBX_MAX_APPS)
+ return -EINVAL;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ new_app.selector = app->selector;
+ new_app.protocolid = app->protocol;
+ new_app.priority = app->priority;
+ /* Already internally available */
+ if (i40e_dcbnl_find_app(old_cfg, &new_app))
+ return 0;
+
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+ /* Add the app */
+ pf->tmp_cfg.app[pf->tmp_cfg.numapps++] = new_app;
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB configuration err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_ieee_delapp - delete local IEEE App configuration
+ * @netdev: the corresponding netdev
+ * @app: structure to hold the Application information
+ *
+ * Deletes local IEEE App configuration other than the first application
+ * required by firmware
+ **/
+static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct i40e_dcbx_config *old_cfg;
+ int i, j, ret;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_cfg = &pf->hw.local_dcbx_config;
+ /* Need one app for FW so keep it */
+ if (old_cfg->numapps == 1)
+ return 0;
+
+ /* Copy current config into temp */
+ pf->tmp_cfg = *old_cfg;
+
+ /* Find and reset the app */
+ for (i = 1; i < pf->tmp_cfg.numapps; i++) {
+ if (app->selector == pf->tmp_cfg.app[i].selector &&
+ app->protocol == pf->tmp_cfg.app[i].protocolid &&
+ app->priority == pf->tmp_cfg.app[i].priority) {
+ /* Reset the app data */
+ pf->tmp_cfg.app[i].selector = 0;
+ pf->tmp_cfg.app[i].protocolid = 0;
+ pf->tmp_cfg.app[i].priority = 0;
+ break;
+ }
+ }
+
+ /* If the specific DCB app not found */
+ if (i == pf->tmp_cfg.numapps)
+ return -EINVAL;
+
+ pf->tmp_cfg.numapps--;
+ /* Overwrite the tmp_cfg app */
+ for (j = i; j < pf->tmp_cfg.numapps; j++)
+ pf->tmp_cfg.app[j] = old_cfg->app[j + 1];
+
+ ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Failed setting DCB configuration err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_getstate - Get DCB enabled state
+ * @netdev: the corresponding netdev
+ *
+ * Get the current DCB enabled state
+ **/
+static u8 i40e_dcbnl_getstate(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "DCB state=%d\n",
+ !!(pf->flags & I40E_FLAG_DCB_ENABLED));
+ return !!(pf->flags & I40E_FLAG_DCB_ENABLED);
+}
+
+/**
+ * i40e_dcbnl_setstate - Set DCB state
+ * @netdev: the corresponding netdev
+ * @state: enable or disable
+ *
+ * Set the DCB state
+ **/
+static u8 i40e_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int ret = I40E_DCBNL_STATUS_SUCCESS;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return ret;
+
+ dev_dbg(&pf->pdev->dev, "new state=%d current state=%d\n",
+ state, (pf->flags & I40E_FLAG_DCB_ENABLED) ? 1 : 0);
+ /* Nothing to do */
+ if (!state == !(pf->flags & I40E_FLAG_DCB_ENABLED))
+ return ret;
+
+ if (i40e_is_sw_dcb(pf)) {
+ if (state) {
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ memcpy(&pf->hw.desired_dcbx_config,
+ &pf->hw.local_dcbx_config,
+ sizeof(struct i40e_dcbx_config));
+ } else {
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ }
+ } else {
+ /* Cannot directly manipulate FW LLDP Agent */
+ ret = I40E_DCBNL_STATUS_ERROR;
+ }
+ return ret;
+}
+
+/**
+ * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx config
+ * @netdev: the corresponding netdev
+ * @tc: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @bwg_id: the BW group id the traffic class belongs to
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding tc
+ *
+ * Set Tx PG settings for CEE mode
+ **/
+static void i40e_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
+ u8 prio_type, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int i;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ /* LLTC not supported yet */
+ if (tc >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ /* prio_type, bwg_id and bw_pct per UP are not supported */
+
+ /* Use only up_map to map tc */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (up_map & BIT(i))
+ pf->tmp_cfg.etscfg.prioritytable[i] = tc;
+ }
+ pf->tmp_cfg.etscfg.tsatable[tc] = I40E_IEEE_TSA_ETS;
+ dev_dbg(&pf->pdev->dev,
+ "Set PG config tc=%d bwg_id=%d prio_type=%d bw_pct=%d up_map=%d\n",
+ tc, bwg_id, prio_type, bw_pct, up_map);
+}
+
+/**
+ * i40e_dcbnl_set_pg_tc_cfg_tx - Set CEE PG Tx BW config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the specified traffic class
+ *
+ * Set Tx BW settings for CEE mode
+ **/
+static void i40e_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ /* LLTC not supported yet */
+ if (pgid >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ pf->tmp_cfg.etscfg.tcbwtable[pgid] = bw_pct;
+ dev_dbg(&pf->pdev->dev, "Set PG BW config tc=%d bw_pct=%d\n",
+ pgid, bw_pct);
+}
+
+/**
+ * i40e_dcbnl_set_pg_tc_cfg_rx - Set CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding traffic class
+ * @prio_type: the traffic priority type
+ * @pgid: the BW group id the traffic class belongs to
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding tc
+ *
+ * Set Rx BW settings for CEE mode. The hardware does not support this
+ * so we won't allow setting of this parameter.
+ **/
+static void i40e_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
+ int __always_unused prio,
+ u8 __always_unused prio_type,
+ u8 __always_unused pgid,
+ u8 __always_unused bw_pct,
+ u8 __always_unused up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "Rx TC PG Config Not Supported.\n");
+}
+
+/**
+ * i40e_dcbnl_set_pg_bwg_cfg_rx - Set CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the specified traffic class
+ *
+ * Set Rx BW settings for CEE mode. The hardware does not support this
+ * so we won't allow setting of this parameter.
+ **/
+static void i40e_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "Rx BWG PG Config Not Supported.\n");
+}
+
+/**
+ * i40e_dcbnl_get_pg_tc_cfg_tx - Get CEE PG Tx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @prio_type: traffic priority type
+ * @pgid: the BW group ID the traffic class belongs to
+ * @bw_pct: BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * Get Tx PG settings for CEE mode
+ **/
+static void i40e_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
+ u8 __always_unused *prio_type,
+ u8 *pgid,
+ u8 __always_unused *bw_pct,
+ u8 __always_unused *up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pf->hw.local_dcbx_config.etscfg.prioritytable[prio];
+ dev_dbg(&pf->pdev->dev, "Get PG config prio=%d tc=%d\n",
+ prio, *pgid);
+}
+
+/**
+ * i40e_dcbnl_get_pg_bwg_cfg_tx - Get CEE PG BW config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ *
+ * Get Tx BW settings for given TC in CEE mode
+ **/
+static void i40e_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (pgid >= I40E_MAX_TRAFFIC_CLASS)
+ return;
+
+ *bw_pct = pf->hw.local_dcbx_config.etscfg.tcbwtable[pgid];
+ dev_dbg(&pf->pdev->dev, "Get PG BW config tc=%d bw_pct=%d\n",
+ pgid, *bw_pct);
+}
+
+/**
+ * i40e_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @prio_type: the traffic priority type
+ * @pgid: the PG ID
+ * @bw_pct: the BW percentage for the corresponding BWG
+ * @up_map: prio mapped to corresponding TC
+ *
+ * Get Rx PG settings for CEE mode. The UP2TC map is applied in same
+ * manner for Tx and Rx (symmetrical) so return the TC information for
+ * given priority accordingly.
+ **/
+static void i40e_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *pgid = pf->hw.local_dcbx_config.etscfg.prioritytable[prio];
+}
+
+/**
+ * i40e_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
+ * @netdev: the corresponding netdev
+ * @pgid: the corresponding traffic class
+ * @bw_pct: the BW percentage for the corresponding TC
+ *
+ * Get Rx BW settings for given TC in CEE mode
+ * The adapter doesn't support Rx ETS and runs in strict priority
+ * mode in Rx path and hence just return 0.
+ **/
+static void i40e_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+ *bw_pct = 0;
+}
+
+/**
+ * i40e_dcbnl_set_pfc_cfg - Set CEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @setting: the PFC setting for given priority
+ *
+ * Set the PFC enabled/disabled setting for given user priority
+ **/
+static void i40e_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ if (setting)
+ pf->tmp_cfg.pfc.pfcenable |= BIT(prio);
+ else
+ pf->tmp_cfg.pfc.pfcenable &= ~BIT(prio);
+ dev_dbg(&pf->pdev->dev,
+ "Set PFC Config up=%d setting=%d pfcenable=0x%x\n",
+ prio, setting, pf->tmp_cfg.pfc.pfcenable);
+}
+
+/**
+ * i40e_dcbnl_get_pfc_cfg - Get CEE PFC configuration
+ * @netdev: the corresponding netdev
+ * @prio: the corresponding user priority
+ * @setting: the PFC setting for given priority
+ *
+ * Get the PFC enabled/disabled setting for given user priority
+ **/
+static void i40e_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return;
+
+ if (prio >= I40E_MAX_USER_PRIORITY)
+ return;
+
+ *setting = (pf->hw.local_dcbx_config.pfc.pfcenable >> prio) & 0x1;
+ dev_dbg(&pf->pdev->dev,
+ "Get PFC Config up=%d setting=%d pfcenable=0x%x\n",
+ prio, *setting, pf->hw.local_dcbx_config.pfc.pfcenable);
+}
+
+/**
+ * i40e_dcbnl_cee_set_all - Commit CEE DCB settings to hardware
+ * @netdev: the corresponding netdev
+ *
+ * Commit the current DCB configuration to hardware
+ **/
+static u8 i40e_dcbnl_cee_set_all(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ int err;
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ dev_dbg(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n");
+ err = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
+
+ return err ? I40E_DCBNL_STATUS_ERROR : I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
+ * i40e_dcbnl_get_cap - Get DCBX capabilities of adapter
+ * @netdev: the corresponding netdev
+ * @capid: the capability type
+ * @cap: the capability value
+ *
+ * Return the capability value for a given capability type
+ **/
+static u8 i40e_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = pf->dcbx_cap;
+ break;
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_GSP:
+ case DCB_CAP_ATTR_BCN:
+ default:
+ *cap = false;
+ break;
+ }
+
+ dev_dbg(&pf->pdev->dev, "Get Capability cap=%d capval=0x%x\n",
+ capid, *cap);
+ return I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
+ * i40e_dcbnl_getnumtcs - Get max number of traffic classes supported
+ * @netdev: the corresponding netdev
+ * @tcid: the TC id
+ * @num: total number of TCs supported by the device
+ *
+ * Return the total number of TCs supported by the adapter
+ **/
+static int i40e_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return -EINVAL;
+
+ *num = I40E_MAX_TRAFFIC_CLASS;
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_setnumtcs - Set CEE number of traffic classes
+ * @netdev: the corresponding netdev
+ * @tcid: the TC id
+ * @num: total number of TCs
+ *
+ * Set the total number of TCs (Unsupported)
+ **/
+static int i40e_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ return -EINVAL;
+}
+
+/**
+ * i40e_dcbnl_getpfcstate - Get CEE PFC mode
+ * @netdev: the corresponding netdev
+ *
+ * Get the current PFC enabled state
+ **/
+static u8 i40e_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ /* Return enabled if any PFC enabled UP */
+ if (pf->hw.local_dcbx_config.pfc.pfcenable)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * i40e_dcbnl_setpfcstate - Set CEE PFC mode
+ * @netdev: the corresponding netdev
+ * @state: required state
+ *
+ * The PFC state to be set; this is enabled/disabled based on the PFC
+ * priority settings and not via this call for i40e driver
+ **/
+static void i40e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ dev_dbg(&pf->pdev->dev, "PFC State is modified via PFC config.\n");
+}
+
+/**
+ * i40e_dcbnl_getapp - Get CEE APP
+ * @netdev: the corresponding netdev
+ * @idtype: the App selector
+ * @id: the App ethtype or port number
+ *
+ * Return the CEE mode app for the given idtype and id
+ **/
+static int i40e_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE) ||
+ (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED))
+ return -EINVAL;
+
+ return dcb_getapp(netdev, &app);
+}
+
+/**
+ * i40e_dcbnl_setdcbx - set required DCBx capability
+ * @netdev: the corresponding netdev
+ * @mode: new DCB mode managed or CEE+IEEE
+ *
+ * Set DCBx capability features
+ **/
+static u8 i40e_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
+{
+ struct i40e_pf *pf = i40e_netdev_to_pf(netdev);
+
+ /* Do not allow to set mode if managed by Firmware */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return I40E_DCBNL_STATUS_ERROR;
+
+ /* No support for LLD_MANAGED modes or CEE+IEEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return I40E_DCBNL_STATUS_ERROR;
+
+ /* Already set to the given mode no change */
+ if (mode == pf->dcbx_cap)
+ return I40E_DCBNL_STATUS_SUCCESS;
+
+ pf->dcbx_cap = mode;
+ if (mode & DCB_CAP_DCBX_VER_CEE)
+ pf->hw.local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_CEE;
+ else
+ pf->hw.local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
+
+ dev_dbg(&pf->pdev->dev, "mode=%d\n", mode);
+ return I40E_DCBNL_STATUS_SUCCESS;
+}
+
+/**
* i40e_dcbnl_getdcbx - retrieve current DCBx capability
* @dev: the corresponding netdev
*
@@ -132,7 +842,31 @@ static const struct dcbnl_rtnl_ops dcbnl_ops = {
.ieee_getets = i40e_dcbnl_ieee_getets,
.ieee_getpfc = i40e_dcbnl_ieee_getpfc,
.getdcbx = i40e_dcbnl_getdcbx,
- .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+ .getpermhwaddr = i40e_dcbnl_get_perm_hw_addr,
+ .ieee_setets = i40e_dcbnl_ieee_setets,
+ .ieee_setpfc = i40e_dcbnl_ieee_setpfc,
+ .ieee_setapp = i40e_dcbnl_ieee_setapp,
+ .ieee_delapp = i40e_dcbnl_ieee_delapp,
+ .getstate = i40e_dcbnl_getstate,
+ .setstate = i40e_dcbnl_setstate,
+ .setpgtccfgtx = i40e_dcbnl_set_pg_tc_cfg_tx,
+ .setpgbwgcfgtx = i40e_dcbnl_set_pg_bwg_cfg_tx,
+ .setpgtccfgrx = i40e_dcbnl_set_pg_tc_cfg_rx,
+ .setpgbwgcfgrx = i40e_dcbnl_set_pg_bwg_cfg_rx,
+ .getpgtccfgtx = i40e_dcbnl_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = i40e_dcbnl_get_pg_bwg_cfg_tx,
+ .getpgtccfgrx = i40e_dcbnl_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = i40e_dcbnl_get_pg_bwg_cfg_rx,
+ .setpfccfg = i40e_dcbnl_set_pfc_cfg,
+ .getpfccfg = i40e_dcbnl_get_pfc_cfg,
+ .setall = i40e_dcbnl_cee_set_all,
+ .getcap = i40e_dcbnl_get_cap,
+ .getnumtcs = i40e_dcbnl_getnumtcs,
+ .setnumtcs = i40e_dcbnl_setnumtcs,
+ .getpfcstate = i40e_dcbnl_getpfcstate,
+ .setpfcstate = i40e_dcbnl_setpfcstate,
+ .getapp = i40e_dcbnl_getapp,
+ .setdcbx = i40e_dcbnl_setdcbx,
};
/**
@@ -152,12 +886,16 @@ void i40e_dcbnl_set_all(struct i40e_vsi *vsi)
u8 prio, tc_map;
int i;
+ /* SW DCB taken care by DCBNL set calls */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
+ return;
+
/* DCB not enabled */
if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return;
/* MFP mode but not an iSCSI PF so return */
- if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi))
+ if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(hw->func_caps.iscsi))
return;
dcbxcfg = &hw->local_dcbx_config;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 26ba1f3eb2d8..a8a2b5f683a2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -3222,13 +3222,30 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
fsp->m_u.usr_ip4_spec.proto = 0;
}
- /* Reverse the src and dest notion, since the HW views them from
- * Tx perspective where as the user expects it from Rx filter view.
- */
- fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
- fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
+ if (fsp->flow_type == IPV6_USER_FLOW ||
+ fsp->flow_type == UDP_V6_FLOW ||
+ fsp->flow_type == TCP_V6_FLOW ||
+ fsp->flow_type == SCTP_V6_FLOW) {
+ /* Reverse the src and dest notion, since the HW views them
+ * from Tx perspective where as the user expects it from
+ * Rx filter view.
+ */
+ fsp->h_u.tcp_ip6_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip6_spec.pdst = rule->src_port;
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->src_ip6,
+ sizeof(__be32) * 4);
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->dst_ip6,
+ sizeof(__be32) * 4);
+ } else {
+ /* Reverse the src and dest notion, since the HW views them
+ * from Tx perspective where as the user expects it from
+ * Rx filter view.
+ */
+ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip;
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip;
+ }
switch (rule->flow_type) {
case SCTP_V4_FLOW:
@@ -3240,9 +3257,21 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
case UDP_V4_FLOW:
index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
break;
+ case SCTP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ break;
+ case TCP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ break;
+ case UDP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ break;
case IP_USER_FLOW:
index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
break;
+ case IPV6_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ break;
default:
/* If we have stored a filter with a flow type not listed here
* it is almost certainly a driver bug. WARN(), and then
@@ -3258,6 +3287,20 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
input_set = i40e_read_fd_input_set(pf, index);
no_input_set:
+ if (input_set & I40E_L3_V6_SRC_MASK) {
+ fsp->m_u.tcp_ip6_spec.ip6src[0] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[1] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[2] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6src[3] = htonl(0xFFFFFFFF);
+ }
+
+ if (input_set & I40E_L3_V6_DST_MASK) {
+ fsp->m_u.tcp_ip6_spec.ip6dst[0] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[1] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[2] = htonl(0xFFFFFFFF);
+ fsp->m_u.tcp_ip6_spec.ip6dst[3] = htonl(0xFFFFFFFF);
+ }
+
if (input_set & I40E_L3_SRC_MASK)
fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
@@ -3275,6 +3318,14 @@ no_input_set:
else
fsp->ring_cookie = rule->q_index;
+ if (rule->vlan_tag) {
+ fsp->h_ext.vlan_etype = rule->vlan_etype;
+ fsp->m_ext.vlan_etype = htons(0xFFFF);
+ fsp->h_ext.vlan_tci = rule->vlan_tag;
+ fsp->m_ext.vlan_tci = htons(0xFFFF);
+ fsp->flow_type |= FLOW_EXT;
+ }
+
if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
struct i40e_vsi *vsi;
@@ -3921,6 +3972,14 @@ static const char *i40e_flow_str(struct ethtool_rx_flow_spec *fsp)
return "sctp4";
case IP_USER_FLOW:
return "ip4";
+ case TCP_V6_FLOW:
+ return "tcp6";
+ case UDP_V6_FLOW:
+ return "udp6";
+ case SCTP_V6_FLOW:
+ return "sctp6";
+ case IPV6_USER_FLOW:
+ return "ip6";
default:
return "unknown";
}
@@ -4056,9 +4115,14 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
struct ethtool_rx_flow_spec *fsp,
struct i40e_rx_flow_userdef *userdef)
{
- struct i40e_pf *pf = vsi->back;
+ static const __be32 ipv6_full_mask[4] = {cpu_to_be32(0xffffffff),
+ cpu_to_be32(0xffffffff), cpu_to_be32(0xffffffff),
+ cpu_to_be32(0xffffffff)};
+ struct ethtool_tcpip6_spec *tcp_ip6_spec;
+ struct ethtool_usrip6_spec *usr_ip6_spec;
struct ethtool_tcpip4_spec *tcp_ip4_spec;
struct ethtool_usrip4_spec *usr_ip4_spec;
+ struct i40e_pf *pf = vsi->back;
u64 current_mask, new_mask;
bool new_flex_offset = false;
bool flex_l3 = false;
@@ -4080,11 +4144,28 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
index = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
fdir_filter_count = &pf->fd_udp4_filter_cnt;
break;
+ case SCTP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
+ fdir_filter_count = &pf->fd_sctp6_filter_cnt;
+ break;
+ case TCP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
+ fdir_filter_count = &pf->fd_tcp6_filter_cnt;
+ break;
+ case UDP_V6_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
+ fdir_filter_count = &pf->fd_udp6_filter_cnt;
+ break;
case IP_USER_FLOW:
index = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
fdir_filter_count = &pf->fd_ip4_filter_cnt;
flex_l3 = true;
break;
+ case IPV6_USER_FLOW:
+ index = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ fdir_filter_count = &pf->fd_ip6_filter_cnt;
+ flex_l3 = true;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -4147,6 +4228,53 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return -EOPNOTSUPP;
break;
+ case SCTP_V6_FLOW:
+ new_mask &= ~I40E_VERIFY_TAG_MASK;
+ fallthrough;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ tcp_ip6_spec = &fsp->m_u.tcp_ip6_spec;
+
+ /* Check if user provided IPv6 source address. */
+ if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6src,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_SRC_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &tcp_ip6_spec->ip6src))
+ new_mask &= ~I40E_L3_V6_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Check if user provided destination address. */
+ if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6dst,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_DST_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &tcp_ip6_spec->ip6dst))
+ new_mask &= ~I40E_L3_V6_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 source port */
+ if (tcp_ip6_spec->psrc == htons(0xFFFF))
+ new_mask |= I40E_L4_SRC_MASK;
+ else if (!tcp_ip6_spec->psrc)
+ new_mask &= ~I40E_L4_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* L4 destination port */
+ if (tcp_ip6_spec->pdst == htons(0xFFFF))
+ new_mask |= I40E_L4_DST_MASK;
+ else if (!tcp_ip6_spec->pdst)
+ new_mask &= ~I40E_L4_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Traffic Classes is not supported. */
+ if (tcp_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+ break;
case IP_USER_FLOW:
usr_ip4_spec = &fsp->m_u.usr_ip4_spec;
@@ -4187,10 +4315,62 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
return -EINVAL;
break;
+ case IPV6_USER_FLOW:
+ usr_ip6_spec = &fsp->m_u.usr_ip6_spec;
+
+ /* Check if user provided IPv6 source address. */
+ if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6src,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_SRC_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &usr_ip6_spec->ip6src))
+ new_mask &= ~I40E_L3_V6_SRC_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ /* Check if user provided destination address. */
+ if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6dst,
+ (struct in6_addr *)&ipv6_full_mask))
+ new_mask |= I40E_L3_V6_DST_MASK;
+ else if (ipv6_addr_any((struct in6_addr *)
+ &usr_ip6_spec->ip6src))
+ new_mask &= ~I40E_L3_V6_DST_MASK;
+ else
+ return -EOPNOTSUPP;
+
+ if (usr_ip6_spec->l4_4_bytes == htonl(0xFFFFFFFF))
+ new_mask |= I40E_L4_SRC_MASK | I40E_L4_DST_MASK;
+ else if (!usr_ip6_spec->l4_4_bytes)
+ new_mask &= ~(I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ else
+ return -EOPNOTSUPP;
+
+ /* Filtering on Traffic class is not supported. */
+ if (usr_ip6_spec->tclass)
+ return -EOPNOTSUPP;
+
+ /* Filtering on L4 protocol is not supported */
+ if (usr_ip6_spec->l4_proto)
+ return -EINVAL;
+
+ break;
default:
return -EOPNOTSUPP;
}
+ if (fsp->flow_type & FLOW_EXT) {
+ /* Allow only 802.1Q and no etype defined, as
+ * later it's modified to 0x8100
+ */
+ if (fsp->h_ext.vlan_etype != htons(ETH_P_8021Q) &&
+ fsp->h_ext.vlan_etype != 0)
+ return -EOPNOTSUPP;
+ if (fsp->m_ext.vlan_tci == htons(0xFFFF))
+ new_mask |= I40E_VLAN_SRC_MASK;
+ else
+ new_mask &= ~I40E_VLAN_SRC_MASK;
+ }
+
/* First, clear all flexible filter entries */
new_mask &= ~I40E_FLEX_INPUT_MASK;
@@ -4370,7 +4550,9 @@ static bool i40e_match_fdir_filter(struct i40e_fdir_filter *a,
a->dst_port != b->dst_port ||
a->src_port != b->src_port ||
a->flow_type != b->flow_type ||
- a->ip4_proto != b->ip4_proto)
+ a->ipl4_proto != b->ipl4_proto ||
+ a->vlan_tag != b->vlan_tag ||
+ a->vlan_etype != b->vlan_etype)
return false;
return true;
@@ -4528,15 +4710,38 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
input->flow_type = fsp->flow_type & ~FLOW_EXT;
- input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
- /* Reverse the src and dest notion, since the HW expects them to be from
- * Tx perspective where as the input from user is from Rx filter view.
- */
- input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
- input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
- input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
- input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ input->vlan_etype = fsp->h_ext.vlan_etype;
+ if (!fsp->m_ext.vlan_etype && fsp->h_ext.vlan_tci)
+ input->vlan_etype = cpu_to_be16(ETH_P_8021Q);
+ if (fsp->m_ext.vlan_tci && input->vlan_etype)
+ input->vlan_tag = fsp->h_ext.vlan_tci;
+ if (input->flow_type == IPV6_USER_FLOW ||
+ input->flow_type == UDP_V6_FLOW ||
+ input->flow_type == TCP_V6_FLOW ||
+ input->flow_type == SCTP_V6_FLOW) {
+ /* Reverse the src and dest notion, since the HW expects them
+ * to be from Tx perspective where as the input from user is
+ * from Rx filter view.
+ */
+ input->ipl4_proto = fsp->h_u.usr_ip6_spec.l4_proto;
+ input->dst_port = fsp->h_u.tcp_ip6_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip6_spec.pdst;
+ memcpy(input->dst_ip6, fsp->h_u.ah_ip6_spec.ip6src,
+ sizeof(__be32) * 4);
+ memcpy(input->src_ip6, fsp->h_u.ah_ip6_spec.ip6dst,
+ sizeof(__be32) * 4);
+ } else {
+ /* Reverse the src and dest notion, since the HW expects them
+ * to be from Tx perspective where as the input from user is
+ * from Rx filter view.
+ */
+ input->ipl4_proto = fsp->h_u.usr_ip4_spec.proto;
+ input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst;
+ }
if (userdef.flex_filter) {
input->flex_filter = true;
@@ -5033,23 +5238,13 @@ flags_complete:
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- struct i40e_dcbx_config *dcbcfg;
-
+#ifdef CONFIG_I40E_DCB
+ i40e_dcb_sw_default_config(pf);
+#endif /* CONFIG_I40E_DCB */
+ i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL);
i40e_aq_stop_lldp(&pf->hw, true, false, NULL);
- i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
- /* reset local_dcbx_config to default */
- dcbcfg = &pf->hw.local_dcbx_config;
- dcbcfg->etscfg.willing = 1;
- dcbcfg->etscfg.maxtcs = 0;
- dcbcfg->etscfg.tcbwtable[0] = 100;
- for (i = 1; i < I40E_MAX_TRAFFIC_CLASS; i++)
- dcbcfg->etscfg.tcbwtable[i] = 0;
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
- dcbcfg->etscfg.prioritytable[i] = 0;
- dcbcfg->etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
- dcbcfg->pfc.willing = 1;
- dcbcfg->pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
} else {
+ i40e_set_lldp_forwarding(pf, false);
status = i40e_aq_start_lldp(&pf->hw, false, NULL);
if (status) {
adq_err = pf->hw.aq.asq_last_status;
@@ -5252,12 +5447,131 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp phy_cfg;
+ enum i40e_status_code status = 0;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ /* Get initial PHY capabilities */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Check whether NIC configuration is compatible with Energy Efficient
+ * Ethernet (EEE) mode.
+ */
+ if (phy_cfg.eee_capability == 0)
+ return -EOPNOTSUPP;
+
+ edata->supported = SUPPORTED_Autoneg;
+ edata->lp_advertised = edata->supported;
+
+ /* Get current configuration */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_cfg, NULL);
+ if (status)
+ return -EAGAIN;
+
+ edata->advertised = phy_cfg.eee_capability ? SUPPORTED_Autoneg : 0U;
+ edata->eee_enabled = !!edata->advertised;
+ edata->tx_lpi_enabled = pf->stats.tx_lpi_status;
+
+ edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status;
+
+ return 0;
+}
+
+static int i40e_is_eee_param_supported(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_ethtool_not_used {
+ u32 value;
+ const char *name;
+ } param[] = {
+ {edata->advertised & ~SUPPORTED_Autoneg, "advertise"},
+ {edata->tx_lpi_timer, "tx-timer"},
+ {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"}
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(param); i++) {
+ if (param[i].value) {
+ netdev_info(netdev,
+ "EEE setting %s not supported\n",
+ param[i].name);
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return 0;
}
static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code status = I40E_SUCCESS;
+ struct i40e_aq_set_phy_config config;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ __le16 eee_capability;
+
+ /* Deny parameters we don't support */
+ if (i40e_is_eee_param_supported(netdev, edata))
+ return -EOPNOTSUPP;
+
+ /* Get initial PHY capabilities */
+ status = i40e_aq_get_phy_capabilities(hw, false, true, &abilities,
+ NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Check whether NIC configuration is compatible with Energy Efficient
+ * Ethernet (EEE) mode.
+ */
+ if (abilities.eee_capability == 0)
+ return -EOPNOTSUPP;
+
+ /* Cache initial EEE capability */
+ eee_capability = abilities.eee_capability;
+
+ /* Get current PHY configuration */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (status)
+ return -EAGAIN;
+
+ /* Cache current PHY configuration */
+ config.phy_type = abilities.phy_type;
+ config.phy_type_ext = abilities.phy_type_ext;
+ config.link_speed = abilities.link_speed;
+ config.abilities = abilities.abilities |
+ I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_PHY_FEC_CONFIG_MASK;
+
+ /* Set desired EEE state */
+ if (edata->eee_enabled) {
+ config.eee_capability = eee_capability;
+ config.eeer |= cpu_to_le32(I40E_PRTPM_EEER_TX_LPI_EN_MASK);
+ } else {
+ config.eee_capability = 0;
+ config.eeer &= cpu_to_le32(~I40E_PRTPM_EEER_TX_LPI_EN_MASK);
+ }
+
+ /* Apply modified PHY configuration */
+ status = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (status)
+ return -EAGAIN;
+
+ return 0;
}
static const struct ethtool_ops i40e_ethtool_recovery_mode_ops = {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1db482d310c2..8bb8eb65add9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#include <linux/etherdevice.h>
#include <linux/of_net.h>
@@ -35,7 +35,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
-static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_prep_for_reset(struct i40e_pf *pf);
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
@@ -3496,6 +3496,24 @@ static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
}
/**
+ * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
+ * @pf: Pointer to the targeted PF
+ *
+ * Set all flow director counters to 0.
+ */
+static void i40e_reset_fdir_filter_cnt(struct i40e_pf *pf)
+{
+ pf->fd_tcp4_filter_cnt = 0;
+ pf->fd_udp4_filter_cnt = 0;
+ pf->fd_sctp4_filter_cnt = 0;
+ pf->fd_ip4_filter_cnt = 0;
+ pf->fd_tcp6_filter_cnt = 0;
+ pf->fd_udp6_filter_cnt = 0;
+ pf->fd_sctp6_filter_cnt = 0;
+ pf->fd_ip6_filter_cnt = 0;
+}
+
+/**
* i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
* @vsi: Pointer to the targeted VSI
*
@@ -3512,10 +3530,7 @@ static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
return;
/* Reset FDir counters as we're replaying all existing filters */
- pf->fd_tcp4_filter_cnt = 0;
- pf->fd_udp4_filter_cnt = 0;
- pf->fd_sctp4_filter_cnt = 0;
- pf->fd_ip4_filter_cnt = 0;
+ i40e_reset_fdir_filter_cnt(pf);
hlist_for_each_entry_safe(filter, node,
&pf->fdir_filter_list, fdir_node) {
@@ -5291,6 +5306,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
vsi->seid);
return ret;
}
+ memset(&bw_data, 0, sizeof(bw_data));
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
@@ -5943,6 +5959,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
i40e_status ret;
int i;
+ memset(&bw_data, 0, sizeof(bw_data));
bw_data.tc_valid_bits = ch->enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
bw_data.tc_bw_credits[i] = bw_share[i];
@@ -6398,6 +6415,9 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
/* Enable the TCs available on PF to all VEBs */
tc_map = i40e_pf_get_tc_map(pf);
+ if (tc_map == I40E_DEFAULT_TRAFFIC_CLASS)
+ return;
+
for (v = 0; v < I40E_MAX_VEB; v++) {
if (!pf->veb[v])
continue;
@@ -6465,6 +6485,316 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
}
/**
+ * i40e_suspend_port_tx - Suspend port Tx
+ * @pf: PF struct
+ *
+ * Suspend a port's Tx and issue a PF reset in case of failure.
+ **/
+static int i40e_suspend_port_tx(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int ret;
+
+ ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Suspend Port Tx failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ }
+
+ return ret;
+}
+
+/**
+ * i40e_hw_set_dcb_config - Program new DCBX settings into HW
+ * @pf: PF being configured
+ * @new_cfg: New DCBX configuration
+ *
+ * Program DCB settings into HW and reconfigure VEB/VSIs on
+ * given PF. Uses "Set LLDP MIB" AQC to program the hardware.
+ **/
+static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
+ struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config;
+ int ret;
+
+ /* Check if need reconfiguration */
+ if (!memcmp(&new_cfg, &old_cfg, sizeof(new_cfg))) {
+ dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n");
+ return 0;
+ }
+
+ /* Config change disable all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+
+ /* Copy the new config to the current config */
+ *old_cfg = *new_cfg;
+ old_cfg->etsrec = old_cfg->etscfg;
+ ret = i40e_set_dcb_config(&pf->hw);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Set DCB Config failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+out:
+ /* In case of reset do not try to resume anything */
+ if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) {
+ /* Re-start the VSIs if disabled */
+ ret = i40e_resume_port_tx(pf);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto err;
+ i40e_pf_unquiesce_all_vsi(pf);
+ }
+err:
+ return ret;
+}
+
+/**
+ * i40e_hw_dcb_config - Program new DCBX settings into HW
+ * @pf: PF being configured
+ * @new_cfg: New DCBX configuration
+ *
+ * Program DCB settings into HW and reconfigure VEB/VSIs on
+ * given PF
+ **/
+int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
+{
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ u8 prio_type[I40E_MAX_TRAFFIC_CLASS] = {0};
+ u32 mfs_tc[I40E_MAX_TRAFFIC_CLASS];
+ struct i40e_dcbx_config *old_cfg;
+ u8 mode[I40E_MAX_TRAFFIC_CLASS];
+ struct i40e_rx_pb_config pb_cfg;
+ struct i40e_hw *hw = &pf->hw;
+ u8 num_ports = hw->num_ports;
+ bool need_reconfig;
+ int ret = -EINVAL;
+ u8 lltc_map = 0;
+ u8 tc_map = 0;
+ u8 new_numtc;
+ u8 i;
+
+ dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n");
+ /* Un-pack information to Program ETS HW via shared API
+ * numtc, tcmap
+ * LLTC map
+ * ETS/NON-ETS arbiter mode
+ * max exponent (credit refills)
+ * Total number of ports
+ * PFC priority bit-map
+ * Priority Table
+ * BW % per TC
+ * Arbiter mode between UPs sharing same TC
+ * TSA table (ETS or non-ETS)
+ * EEE enabled or not
+ * MFS TC table
+ */
+
+ new_numtc = i40e_dcb_get_num_tc(new_cfg);
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ for (i = 0; i < new_numtc; i++) {
+ tc_map |= BIT(i);
+ switch (new_cfg->etscfg.tsatable[i]) {
+ case I40E_IEEE_TSA_ETS:
+ prio_type[i] = I40E_DCB_PRIO_TYPE_ETS;
+ ets_data.tc_bw_share_credits[i] =
+ new_cfg->etscfg.tcbwtable[i];
+ break;
+ case I40E_IEEE_TSA_STRICT:
+ prio_type[i] = I40E_DCB_PRIO_TYPE_STRICT;
+ lltc_map |= BIT(i);
+ ets_data.tc_bw_share_credits[i] =
+ I40E_DCB_STRICT_PRIO_CREDITS;
+ break;
+ default:
+ /* Invalid TSA type */
+ need_reconfig = false;
+ goto out;
+ }
+ }
+
+ old_cfg = &hw->local_dcbx_config;
+ /* Check if need reconfiguration */
+ need_reconfig = i40e_dcb_need_reconfig(pf, old_cfg, new_cfg);
+
+ /* If needed, enable/disable frame tagging, disable all VSIs
+ * and suspend port tx
+ */
+ if (need_reconfig) {
+ /* Enable DCB tagging only when more than one TC */
+ if (new_numtc > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
+ set_bit(__I40E_PORT_SUSPENDED, pf->state);
+ /* Reconfiguration needed quiesce all VSIs */
+ i40e_pf_quiesce_all_vsi(pf);
+ ret = i40e_suspend_port_tx(pf);
+ if (ret)
+ goto err;
+ }
+
+ /* Configure Port ETS Tx Scheduler */
+ ets_data.tc_valid_bits = tc_map;
+ ets_data.tc_strict_priority_flags = lltc_map;
+ ret = i40e_aq_config_switch_comp_ets
+ (hw, pf->mac_seid, &ets_data,
+ i40e_aqc_opc_modify_switching_comp_ets, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Modify Port ETS failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Configure Rx ETS HW */
+ memset(&mode, I40E_DCB_ARB_MODE_ROUND_ROBIN, sizeof(mode));
+ i40e_dcb_hw_set_num_tc(hw, new_numtc);
+ i40e_dcb_hw_rx_fifo_config(hw, I40E_DCB_ARB_MODE_ROUND_ROBIN,
+ I40E_DCB_ARB_MODE_STRICT_PRIORITY,
+ I40E_DCB_DEFAULT_MAX_EXPONENT,
+ lltc_map);
+ i40e_dcb_hw_rx_cmd_monitor_config(hw, new_numtc, num_ports);
+ i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode,
+ prio_type);
+ i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable,
+ new_cfg->etscfg.prioritytable);
+ i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable);
+
+ /* Configure Rx Packet Buffers in HW */
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
+ mfs_tc[i] += I40E_PACKET_HDR_PAD;
+ }
+
+ i40e_dcb_hw_calculate_pool_sizes(hw, num_ports,
+ false, new_cfg->pfc.pfcenable,
+ mfs_tc, &pb_cfg);
+ i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg);
+
+ /* Update the local Rx Packet buffer config */
+ pf->pb_cfg = pb_cfg;
+
+ /* Inform the FW about changes to DCB configuration */
+ ret = i40e_aq_dcb_updated(&pf->hw, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "DCB Updated failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto out;
+ }
+
+ /* Update the port DCBx configuration */
+ *old_cfg = *new_cfg;
+
+ /* Changes in configuration update VEB/VSI */
+ i40e_dcb_reconfigure(pf);
+out:
+ /* Re-start the VSIs if disabled */
+ if (need_reconfig) {
+ ret = i40e_resume_port_tx(pf);
+
+ clear_bit(__I40E_PORT_SUSPENDED, pf->state);
+ /* In case of error no point in resuming VSIs */
+ if (ret)
+ goto err;
+
+ /* Wait for the PF's queues to be disabled */
+ ret = i40e_pf_wait_queues_disabled(pf);
+ if (ret) {
+ /* Schedule PF reset to recover */
+ set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+ goto err;
+ } else {
+ i40e_pf_unquiesce_all_vsi(pf);
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ set_bit(__I40E_CLIENT_L2_CHANGE, pf->state);
+ }
+ /* registers are set, lets apply */
+ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB)
+ ret = i40e_hw_set_dcb_config(pf, new_cfg);
+ }
+
+err:
+ return ret;
+}
+
+/**
+ * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
+ * @pf: PF being queried
+ *
+ * Set default DCB configuration in case DCB is to be done in SW.
+ **/
+int i40e_dcb_sw_default_config(struct i40e_pf *pf)
+{
+ struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config;
+ struct i40e_aqc_configure_switching_comp_ets_data ets_data;
+ struct i40e_hw *hw = &pf->hw;
+ int err;
+
+ if (pf->hw_features & I40E_HW_USE_SET_LLDP_MIB) {
+ /* Update the local cached instance with TC0 ETS */
+ memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config));
+ pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
+ pf->tmp_cfg.etscfg.maxtcs = 0;
+ pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
+ pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS;
+ pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING;
+ pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS;
+ /* FW needs one App to configure HW */
+ pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS;
+ pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE;
+ pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO;
+ pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE;
+
+ return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg);
+ }
+
+ memset(&ets_data, 0, sizeof(ets_data));
+ ets_data.tc_valid_bits = I40E_DEFAULT_TRAFFIC_CLASS; /* TC0 only */
+ ets_data.tc_strict_priority_flags = 0; /* ETS */
+ ets_data.tc_bw_share_credits[0] = I40E_IEEE_DEFAULT_ETS_TCBW; /* 100% to TC0 */
+
+ /* Enable ETS on the Physical port */
+ err = i40e_aq_config_switch_comp_ets
+ (hw, pf->mac_seid, &ets_data,
+ i40e_aqc_opc_enable_switching_comp_ets, NULL);
+ if (err) {
+ dev_info(&pf->pdev->dev,
+ "Enable Port ETS failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ err = -ENOENT;
+ goto out;
+ }
+
+ /* Update the local cached instance with TC0 ETS */
+ dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING;
+ dcb_cfg->etscfg.cbs = 0;
+ dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS;
+ dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW;
+
+out:
+ return err;
+}
+
+/**
* i40e_init_pf_dcb - Initialize DCB configuration
* @pf: PF being configured
*
@@ -6474,18 +6804,31 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
static int i40e_init_pf_dcb(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- int err = 0;
+ int err;
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable
* Also do not enable DCBx if FW LLDP agent is disabled
*/
- if ((pf->hw_features & I40E_HW_NO_DCB_SUPPORT) ||
- (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)) {
- dev_info(&pf->pdev->dev, "DCB is not supported or FW LLDP is disabled\n");
+ if (pf->hw_features & I40E_HW_NO_DCB_SUPPORT) {
+ dev_info(&pf->pdev->dev, "DCB is not supported.\n");
err = I40E_NOT_SUPPORTED;
goto out;
}
-
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n");
+ err = i40e_dcb_sw_default_config(pf);
+ if (err) {
+ dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n");
+ goto out;
+ }
+ dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n");
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ /* at init capable but disabled */
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ goto out;
+ }
err = i40e_init_dcb(hw, true);
if (!err) {
/* Device/Function is not DCBX capable */
@@ -6525,6 +6868,40 @@ out:
#endif /* CONFIG_I40E_DCB */
/**
+ * i40e_set_lldp_forwarding - set forwarding of lldp frames
+ * @pf: PF being configured
+ * @enable: if forwarding to OS shall be enabled
+ *
+ * Toggle forwarding of lldp frames behavior,
+ * When passing DCB control from firmware to software
+ * lldp frames must be forwarded to the software based
+ * lldp agent.
+ */
+void i40e_set_lldp_forwarding(struct i40e_pf *pf, bool enable)
+{
+ if (pf->lan_vsi == I40E_NO_VSI)
+ return;
+
+ if (!pf->vsi[pf->lan_vsi])
+ return;
+
+ /* No need to check the outcome, commands may fail
+ * if desired value is already set
+ */
+ i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
+ pf->vsi[pf->lan_vsi]->seid, 0,
+ enable, NULL, NULL);
+
+ i40e_aq_add_rem_control_packet_filter(&pf->hw, NULL, ETH_P_LLDP,
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX |
+ I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC,
+ pf->vsi[pf->lan_vsi]->seid, 0,
+ enable, NULL, NULL);
+}
+
+/**
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
* @isup: true of link is up, false otherwise
@@ -8287,7 +8664,6 @@ int i40e_open(struct net_device *netdev)
TCP_FLAG_FIN |
TCP_FLAG_CWR) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
-
udp_tunnel_get_rx_info(netdev);
return 0;
@@ -8402,32 +8778,51 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
INIT_LIST_HEAD(&pf->l4_flex_pit_list);
pf->fdir_pf_active_filters = 0;
- pf->fd_tcp4_filter_cnt = 0;
- pf->fd_udp4_filter_cnt = 0;
- pf->fd_sctp4_filter_cnt = 0;
- pf->fd_ip4_filter_cnt = 0;
+ i40e_reset_fdir_filter_cnt(pf);
/* Reprogram the default input set for TCP/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_TCP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ /* Reprogram the default input set for TCP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_TCP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
/* Reprogram the default input set for UDP/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_UDP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ /* Reprogram the default input set for UDP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_UDP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
/* Reprogram the default input set for SCTP/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK |
I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+ /* Reprogram the default input set for SCTP/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_SCTP,
+ I40E_L3_V6_SRC_MASK | I40E_L3_V6_DST_MASK |
+ I40E_L4_SRC_MASK | I40E_L4_DST_MASK);
+
/* Reprogram the default input set for Other/IPv4 */
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ /* Reprogram the default input set for Other/IPv6 */
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV6_OTHER,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
+
+ i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV6,
+ I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
}
/**
@@ -8543,7 +8938,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
*
* Resets PF and reinitializes PFs VSI.
*/
- i40e_prep_for_reset(pf, lock_acquired);
+ i40e_prep_for_reset(pf);
i40e_reset_and_rebuild(pf, true, lock_acquired);
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
@@ -8653,6 +9048,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
int ret = 0;
u8 type;
+ /* X710-T*L 2.5G and 5G speeds don't support DCB */
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ ~(I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB)) &&
+ !(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ /* let firmware decide if the DCB should be disabled */
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+
/* Not DCB capable or capability disabled */
if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return ret;
@@ -8684,10 +9087,20 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
/* Get updated DCBX data from firmware */
ret = i40e_get_dcb_config(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev,
- "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ /* X710-T*L 2.5G and 5G speeds don't support DCB */
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
+ dev_warn(&pf->pdev->dev,
+ "DCB is not supported for X710-T*L 2.5/5G speeds\n");
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ }
goto exit;
}
@@ -8891,8 +9304,17 @@ static void i40e_delete_invalid_filter(struct i40e_pf *pf,
case SCTP_V4_FLOW:
pf->fd_sctp4_filter_cnt--;
break;
+ case TCP_V6_FLOW:
+ pf->fd_tcp6_filter_cnt--;
+ break;
+ case UDP_V6_FLOW:
+ pf->fd_udp6_filter_cnt--;
+ break;
+ case SCTP_V6_FLOW:
+ pf->fd_udp6_filter_cnt--;
+ break;
case IP_USER_FLOW:
- switch (filter->ip4_proto) {
+ switch (filter->ipl4_proto) {
case IPPROTO_TCP:
pf->fd_tcp4_filter_cnt--;
break;
@@ -8907,6 +9329,22 @@ static void i40e_delete_invalid_filter(struct i40e_pf *pf,
break;
}
break;
+ case IPV6_USER_FLOW:
+ switch (filter->ipl4_proto) {
+ case IPPROTO_TCP:
+ pf->fd_tcp6_filter_cnt--;
+ break;
+ case IPPROTO_UDP:
+ pf->fd_udp6_filter_cnt--;
+ break;
+ case IPPROTO_SCTP:
+ pf->fd_sctp6_filter_cnt--;
+ break;
+ case IPPROTO_IP:
+ pf->fd_ip6_filter_cnt--;
+ break;
+ }
+ break;
}
/* Remove the filter from the list and free memory */
@@ -8940,7 +9378,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
* rules active.
*/
if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) &&
- (pf->fd_tcp4_filter_cnt == 0))
+ pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0)
i40e_reenable_fdir_atr(pf);
/* if hw had a problem adding a filter, delete it */
@@ -9109,6 +9547,9 @@ static void i40e_link_event(struct i40e_pf *pf)
u8 new_link_speed, old_link_speed;
i40e_status status;
bool new_link, old_link;
+#ifdef CONFIG_I40E_DCB
+ int err;
+#endif /* CONFIG_I40E_DCB */
/* set this to force the get_link_status call to refresh state */
pf->hw.phy.get_link_info = true;
@@ -9152,6 +9593,31 @@ static void i40e_link_event(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_PTP)
i40e_ptp_set_increment(pf);
+#ifdef CONFIG_I40E_DCB
+ if (new_link == old_link)
+ return;
+ /* Not SW DCB so firmware will take care of default settings */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ return;
+
+ /* We cover here only link down, as after link up in case of SW DCB
+ * SW LLDP agent will take care of setting it up
+ */
+ if (!new_link) {
+ dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n");
+ memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg));
+ err = i40e_dcb_sw_default_config(pf);
+ if (err) {
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
+ } else {
+ pf->dcbx_cap = DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ }
+ }
+#endif /* CONFIG_I40E_DCB */
}
/**
@@ -9228,7 +9694,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
* precedence before starting a new reset sequence.
*/
if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) {
- i40e_prep_for_reset(pf, false);
+ i40e_prep_for_reset(pf);
i40e_reset(pf);
i40e_rebuild(pf, false, false);
}
@@ -9360,7 +9826,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
switch (opcode) {
case i40e_aqc_opc_get_link_status:
+ rtnl_lock();
i40e_handle_link_event(pf, &event);
+ rtnl_unlock();
break;
case i40e_aqc_opc_send_msg_to_pf:
ret = i40e_vc_process_vf_msg(pf,
@@ -9374,7 +9842,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
#ifdef CONFIG_I40E_DCB
rtnl_lock();
- ret = i40e_handle_lldp_event(pf, &event);
+ i40e_handle_lldp_event(pf, &event);
rtnl_unlock();
#endif /* CONFIG_I40E_DCB */
break;
@@ -9860,12 +10328,10 @@ static int i40e_rebuild_channels(struct i40e_vsi *vsi)
/**
* i40e_prep_for_reset - prep for the core to reset
* @pf: board private structure
- * @lock_acquired: indicates whether or not the lock has been acquired
- * before this function was called.
*
* Close up the VFs and other things in prep for PF Reset.
**/
-static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
+static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
i40e_status ret = 0;
@@ -9880,12 +10346,7 @@ static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired)
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
/* quiesce the VSIs and their queues that are not already DOWN */
- /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
- if (!lock_acquired)
- rtnl_lock();
i40e_pf_quiesce_all_vsi(pf);
- if (!lock_acquired)
- rtnl_unlock();
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
@@ -10097,24 +10558,41 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
goto end_core_reset;
}
- /* Enable FW to write a default DCB config on link-up */
- i40e_aq_set_dcb_parameters(hw, true, NULL);
-
-#ifdef CONFIG_I40E_DCB
- ret = i40e_init_pf_dcb(pf);
- if (ret) {
- dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
- /* Continue without DCB enabled */
- }
-#endif /* CONFIG_I40E_DCB */
- /* do basic switch setup */
if (!lock_acquired)
rtnl_lock();
ret = i40e_setup_pf_switch(pf, reinit);
if (ret)
goto end_unlock;
+#ifdef CONFIG_I40E_DCB
+ /* Enable FW to write a default DCB config on link-up
+ * unless I40E_FLAG_TC_MQPRIO was enabled or DCB
+ * is not supported with new link speed
+ */
+ if (pf->flags & I40E_FLAG_TC_MQPRIO) {
+ i40e_aq_set_dcb_parameters(hw, false, NULL);
+ } else {
+ if (I40E_IS_X710TL_DEVICE(hw->device_id) &&
+ (hw->phy.link_info.link_speed &
+ (I40E_LINK_SPEED_2_5GB | I40E_LINK_SPEED_5GB))) {
+ i40e_aq_set_dcb_parameters(hw, false, NULL);
+ dev_warn(&pf->pdev->dev,
+ "DCB is not supported for X710-T*L 2.5/5G speeds\n");
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ } else {
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+ ret = i40e_init_pf_dcb(pf);
+ if (ret) {
+ dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n",
+ ret);
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ /* Continue without DCB enabled */
+ }
+ }
+ }
+
+#endif /* CONFIG_I40E_DCB */
+
/* The driver only wants link up/down and module qualification
* reports from firmware. Note the negative logic.
*/
@@ -10251,6 +10729,10 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
*/
i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
pf->main_vsi_seid);
+#ifdef CONFIG_I40E_DCB
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
+ i40e_set_lldp_forwarding(pf, true);
+#endif /* CONFIG_I40E_DCB */
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
@@ -10317,7 +10799,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
**/
static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired)
{
- i40e_prep_for_reset(pf, lock_acquired);
+ i40e_prep_for_reset(pf);
i40e_reset_and_rebuild(pf, false, lock_acquired);
}
@@ -11651,7 +12133,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
u16 qcount;
vsi->req_queue_pairs = queue_count;
- i40e_prep_for_reset(pf, true);
+ i40e_prep_for_reset(pf);
pf->alloc_rss_size = new_rss_size;
@@ -12448,9 +12930,10 @@ out_err:
* i40e_xdp_setup - add/remove an XDP program
* @vsi: VSI to changed
* @prog: XDP program
+ * @extack: netlink extended ack
**/
-static int i40e_xdp_setup(struct i40e_vsi *vsi,
- struct bpf_prog *prog)
+static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
int frame_size = vsi->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_pf *pf = vsi->back;
@@ -12459,17 +12942,16 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
int i;
/* Don't allow frames that span over multiple buffers */
- if (frame_size > vsi->rx_buf_len)
+ if (frame_size > vsi->rx_buf_len) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
return -EINVAL;
-
- if (!i40e_enabled_xdp_vsi(vsi) && !prog)
- return 0;
+ }
/* When turning XDP on->off/off->on we reset and rebuild the rings. */
need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
if (need_reset)
- i40e_prep_for_reset(pf, true);
+ i40e_prep_for_reset(pf);
old_prog = xchg(&vsi->xdp_prog, prog);
@@ -12769,7 +13251,7 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
- return i40e_xdp_setup(vsi, xdp->prog);
+ return i40e_xdp_setup(vsi, xdp->prog, xdp->extack);
case XDP_SETUP_XSK_POOL:
return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
@@ -12804,8 +13286,6 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -14706,6 +15186,10 @@ err_switch_setup:
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct i40e_aq_get_phy_abilities_resp abilities;
+#ifdef CONFIG_I40E_DCB
+ enum i40e_get_fw_lldp_status_resp lldp_status;
+ i40e_status status;
+#endif /* CONFIG_I40E_DCB */
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
@@ -14962,6 +15446,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
+#ifdef CONFIG_I40E_DCB
+ status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status);
+ (!status &&
+ lldp_status == I40E_GET_FW_LLDP_STATUS_ENABLED) ?
+ (pf->flags &= ~I40E_FLAG_DISABLE_FW_LLDP) :
+ (pf->flags |= I40E_FLAG_DISABLE_FW_LLDP);
dev_info(&pdev->dev,
(pf->flags & I40E_FLAG_DISABLE_FW_LLDP) ?
"FW LLDP is disabled\n" :
@@ -14970,7 +15460,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Enable FW to write default DCB config on link-up */
i40e_aq_set_dcb_parameters(hw, true, NULL);
-#ifdef CONFIG_I40E_DCB
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
@@ -15263,6 +15752,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
pf->main_vsi_seid);
+#ifdef CONFIG_I40E_DCB
+ if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP)
+ i40e_set_lldp_forwarding(pf, true);
+#endif /* CONFIG_I40E_DCB */
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
@@ -15465,7 +15958,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, pf->state))
- i40e_prep_for_reset(pf, false);
+ i40e_prep_for_reset(pf);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -15515,7 +16008,7 @@ static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
- i40e_prep_for_reset(pf, false);
+ i40e_prep_for_reset(pf);
}
/**
@@ -15619,7 +16112,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
if (pf->wol_en && (pf->hw_features & I40E_HW_WOL_MC_MAGIC_PKT_WAKE))
i40e_enable_mc_magic_wake(pf);
- i40e_prep_for_reset(pf, false);
+ i40e_prep_for_reset(pf);
wr32(hw, I40E_PFPM_APM,
(pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
@@ -15678,7 +16171,7 @@ static int __maybe_unused i40e_suspend(struct device *dev)
*/
rtnl_lock();
- i40e_prep_for_reset(pf, true);
+ i40e_prep_for_reset(pf);
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 5c1378641b3b..aaea297640e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_PROTOTYPE_H_
#define _I40E_PROTOTYPE_H_
@@ -200,6 +200,10 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
u8 mib_type, void *buff, u16 buff_size,
u16 *local_len, u16 *remote_len,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_lldp_mib(struct i40e_hw *hw,
+ u8 mib_type, void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
@@ -289,6 +293,9 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
u8 filter_count);
i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
+enum i40e_status_code
+i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
/* i40e_common */
i40e_status i40e_init_shared_code(struct i40e_hw *hw);
i40e_status i40e_pf_reset(struct i40e_hw *hw);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 564df22f3f46..36f7b27a04ae 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_REGISTER_H_
#define _I40E_REGISTER_H_
@@ -34,12 +34,137 @@
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
+#define I40E_PRT_SWR_PM_THR 0x0026CD00 /* Reset: CORER */
+#define I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT 0
+#define I40E_PRT_SWR_PM_THR_THRESHOLD_MASK I40E_MASK(0xFF, I40E_PRT_SWR_PM_THR_THRESHOLD_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
+#define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
+#define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
#define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
#define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
+#define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
+#define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
+#define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
+#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
+#define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
+#define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_RUPTQ(_i) (0x00122400 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_RUPTQ_MAX_INDEX 7
+#define I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT 0
+#define I40E_PRTDCB_RUPTQ_RXQNUM_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_RUPTQ_RXQNUM_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
+#define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
+#define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
+#define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, \
+ I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, \
+ I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
+#define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
+#define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
@@ -359,6 +484,27 @@
#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
#define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, \
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, \
+ I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, \
+ I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, \
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, \
+ I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
@@ -398,8 +544,34 @@
#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
+#define I40E_PRTRPB_SHW_SHW_SHIFT 0
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
+#define I40E_PRTRPB_SLW_SLW_SHIFT 0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
+#define I40E_PRTRPB_SPS_SPS_SHIFT 0
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
#define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 4aca637d4a23..f6f1af94cca0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -42,9 +42,6 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
(fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
- flex_ptype |= I40E_TXD_FLTR_QW0_PCTYPE_MASK &
- (fdata->flex_offset << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT);
-
/* Use LAN VSI Id if not programmed by user */
flex_ptype |= I40E_TXD_FLTR_QW0_DEST_VSI_MASK &
((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) <<
@@ -160,59 +157,180 @@ dma_fail:
return -1;
}
-#define IP_HEADER_OFFSET 14
-#define I40E_UDPIP_DUMMY_PACKET_LEN 42
/**
- * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
- * @vsi: pointer to the targeted VSI
- * @fd_data: the flow director data required for the FDir descriptor
- * @add: true adds a filter, false removes it
+ * i40e_create_dummy_packet - Constructs dummy packet for HW
+ * @dummy_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
*
- * Returns 0 if the filters were successfully added or removed
+ * Returns address of layer 4 protocol dummy packet.
+ **/
+static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ bool is_vlan = !!data->vlan_tag;
+ struct vlan_hdr vlan;
+ struct ipv6hdr ipv6;
+ struct ethhdr eth;
+ struct iphdr ip;
+ u8 *tmp;
+
+ if (ipv4) {
+ eth.h_proto = cpu_to_be16(ETH_P_IP);
+ ip.protocol = l4proto;
+ ip.version = 0x4;
+ ip.ihl = 0x5;
+
+ ip.daddr = data->dst_ip;
+ ip.saddr = data->src_ip;
+ } else {
+ eth.h_proto = cpu_to_be16(ETH_P_IPV6);
+ ipv6.nexthdr = l4proto;
+ ipv6.version = 0x6;
+
+ memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6,
+ sizeof(__be32) * 4);
+ memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6,
+ sizeof(__be32) * 4);
+ }
+
+ if (is_vlan) {
+ vlan.h_vlan_TCI = data->vlan_tag;
+ vlan.h_vlan_encapsulated_proto = eth.h_proto;
+ eth.h_proto = data->vlan_etype;
+ }
+
+ tmp = dummy_packet;
+ memcpy(tmp, &eth, sizeof(eth));
+ tmp += sizeof(eth);
+
+ if (is_vlan) {
+ memcpy(tmp, &vlan, sizeof(vlan));
+ tmp += sizeof(vlan);
+ }
+
+ if (ipv4) {
+ memcpy(tmp, &ip, sizeof(ip));
+ tmp += sizeof(ip);
+ } else {
+ memcpy(tmp, &ipv6, sizeof(ipv6));
+ tmp += sizeof(ipv6);
+ }
+
+ return tmp;
+}
+
+/**
+ * i40e_create_dummy_udp_packet - helper function to create UDP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate udp fields.
**/
-static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
+static void i40e_create_dummy_udp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
{
- struct i40e_pf *pf = vsi->back;
struct udphdr *udp;
- struct iphdr *ip;
- u8 *raw_packet;
- int ret;
- static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ u8 *tmp;
- raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
- if (!raw_packet)
- return -ENOMEM;
- memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_UDP, data);
+ udp = (struct udphdr *)(tmp);
+ udp->dest = data->dst_port;
+ udp->source = data->src_port;
+}
+
+/**
+ * i40e_create_dummy_tcp_packet - helper function to create TCP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate tcp fields.
+ **/
+static void i40e_create_dummy_tcp_packet(u8 *raw_packet, bool ipv4, u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ struct tcphdr *tcp;
+ u8 *tmp;
+ /* Dummy tcp packet */
+ static const char tcp_packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x50, 0x11, 0x0, 0x72, 0, 0, 0, 0};
- ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- udp = (struct udphdr *)(raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_TCP, data);
- ip->daddr = fd_data->dst_ip;
- udp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip;
- udp->source = fd_data->src_port;
+ tcp = (struct tcphdr *)tmp;
+ memcpy(tcp, tcp_packet, sizeof(tcp_packet));
+ tcp->dest = data->dst_port;
+ tcp->source = data->src_port;
+}
+
+/**
+ * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
+ * @raw_packet: preallocated space for dummy packet
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @l4proto: next level protocol used in data portion of l3
+ * @data: filter data
+ *
+ * Helper function to populate sctp fields.
+ **/
+static void i40e_create_dummy_sctp_packet(u8 *raw_packet, bool ipv4,
+ u8 l4proto,
+ struct i40e_fdir_filter *data)
+{
+ struct sctphdr *sctp;
+ u8 *tmp;
+
+ tmp = i40e_create_dummy_packet(raw_packet, ipv4, IPPROTO_SCTP, data);
+
+ sctp = (struct sctphdr *)tmp;
+ sctp->dest = data->dst_port;
+ sctp->source = data->src_port;
+}
+
+/**
+ * i40e_prepare_fdir_filter - Prepare and program fdir filter
+ * @pf: physical function to attach filter to
+ * @fd_data: filter data
+ * @add: add or delete filter
+ * @packet_addr: address of dummy packet, used in filtering
+ * @payload_offset: offset from dummy packet address to user defined data
+ * @pctype: Packet type for which filter is used
+ *
+ * Helper function to offset data of dummy packet, program it and
+ * handle errors.
+ **/
+static int i40e_prepare_fdir_filter(struct i40e_pf *pf,
+ struct i40e_fdir_filter *fd_data,
+ bool add, char *packet_addr,
+ int payload_offset, u8 pctype)
+{
+ int ret;
if (fd_data->flex_filter) {
- u8 *payload = raw_packet + I40E_UDPIP_DUMMY_PACKET_LEN;
+ u8 *payload;
__be16 pattern = fd_data->flex_word;
u16 off = fd_data->flex_offset;
+ payload = packet_addr + payload_offset;
+
+ /* If user provided vlan, offset payload by vlan header length */
+ if (!!fd_data->vlan_tag)
+ payload += VLAN_HLEN;
+
*((__force __be16 *)(payload + off)) = pattern;
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ fd_data->pctype = pctype;
+ ret = i40e_program_fdir_filter(fd_data, packet_addr, pf, add);
if (ret) {
dev_info(&pf->pdev->dev,
"PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
fd_data->pctype, fd_data->fd_id, ret);
/* Free the packet buffer since it wasn't added to the ring */
- kfree(raw_packet);
return -EOPNOTSUPP;
} else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
if (add)
@@ -225,238 +343,243 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
fd_data->pctype, fd_data->fd_id);
}
- if (add)
- pf->fd_udp4_filter_cnt++;
- else
- pf->fd_udp4_filter_cnt--;
+ return ret;
+}
- return 0;
+/**
+ * i40e_change_filter_num - Prepare and program fdir filter
+ * @ipv4: is layer 3 packet of version 4 or 6
+ * @add: add or delete filter
+ * @ipv4_filter_num: field to update
+ * @ipv6_filter_num: field to update
+ *
+ * Update filter number field for pf.
+ **/
+static void i40e_change_filter_num(bool ipv4, bool add, u16 *ipv4_filter_num,
+ u16 *ipv6_filter_num)
+{
+ if (add) {
+ if (ipv4)
+ (*ipv4_filter_num)++;
+ else
+ (*ipv6_filter_num)++;
+ } else {
+ if (ipv4)
+ (*ipv4_filter_num)--;
+ else
+ (*ipv6_filter_num)--;
+ }
}
-#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+#define IP_HEADER_OFFSET 14
+#define I40E_UDPIP_DUMMY_PACKET_LEN 42
+#define I40E_UDPIP6_DUMMY_PACKET_LEN 62
/**
- * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
+ * i40e_add_del_fdir_udp - Add/Remove UDP filters
* @vsi: pointer to the targeted VSI
* @fd_data: the flow director data required for the FDir descriptor
* @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
*
* Returns 0 if the filters were successfully added or removed
**/
-static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
+static int i40e_add_del_fdir_udp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
{
struct i40e_pf *pf = vsi->back;
- struct tcphdr *tcp;
- struct iphdr *ip;
u8 *raw_packet;
int ret;
- /* Dummy packet */
- static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x28, 0, 0, 0x40, 0, 0x40, 0x6, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
- 0x0, 0x72, 0, 0, 0, 0};
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
if (!raw_packet)
return -ENOMEM;
- memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
-
- ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- tcp = (struct tcphdr *)(raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip;
- tcp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip;
- tcp->source = fd_data->src_port;
+ i40e_create_dummy_udp_packet(raw_packet, ipv4, IPPROTO_UDP, fd_data);
- if (fd_data->flex_filter) {
- u8 *payload = raw_packet + I40E_TCPIP_DUMMY_PACKET_LEN;
- __be16 pattern = fd_data->flex_word;
- u16 off = fd_data->flex_offset;
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_UDPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_UDPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
- *((__force __be16 *)(payload + off)) = pattern;
+ if (ret) {
+ kfree(raw_packet);
+ return ret;
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt,
+ &pf->fd_udp6_filter_cnt);
+
+ return 0;
+}
+
+#define I40E_TCPIP_DUMMY_PACKET_LEN 54
+#define I40E_TCPIP6_DUMMY_PACKET_LEN 74
+/**
+ * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
+ * @vsi: pointer to the targeted VSI
+ * @fd_data: the flow director data required for the FDir descriptor
+ * @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
+ *
+ * Returns 0 if the filters were successfully added or removed
+ **/
+static int i40e_add_del_fdir_tcp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
+{
+ struct i40e_pf *pf = vsi->back;
+ u8 *raw_packet;
+ int ret;
+
+ raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+ if (!raw_packet)
+ return -ENOMEM;
+
+ i40e_create_dummy_tcp_packet(raw_packet, ipv4, IPPROTO_TCP, fd_data);
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_TCPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_TCPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+
if (ret) {
- dev_info(&pf->pdev->dev,
- "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
- fd_data->pctype, fd_data->fd_id, ret);
- /* Free the packet buffer since it wasn't added to the ring */
kfree(raw_packet);
- return -EOPNOTSUPP;
- } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
- if (add)
- dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n",
- fd_data->pctype, fd_data->fd_id);
- else
- dev_info(&pf->pdev->dev,
- "Filter deleted for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
+ return ret;
}
+ i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt,
+ &pf->fd_tcp6_filter_cnt);
+
if (add) {
- pf->fd_tcp4_filter_cnt++;
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
- } else {
- pf->fd_tcp4_filter_cnt--;
}
-
return 0;
}
-#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
+#define I40E_SCTPIP_DUMMY_PACKET_LEN 46
+#define I40E_SCTPIP6_DUMMY_PACKET_LEN 66
/**
- * i40e_add_del_fdir_sctpv4 - Add/Remove SCTPv4 Flow Director filters for
+ * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
* a specific flow spec
* @vsi: pointer to the targeted VSI
* @fd_data: the flow director data required for the FDir descriptor
* @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
*
* Returns 0 if the filters were successfully added or removed
**/
-static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
+static int i40e_add_del_fdir_sctp(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
{
struct i40e_pf *pf = vsi->back;
- struct sctphdr *sctp;
- struct iphdr *ip;
u8 *raw_packet;
int ret;
- /* Dummy packet */
- static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x20, 0, 0, 0x40, 0, 0x40, 0x84, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
if (!raw_packet)
return -ENOMEM;
- memcpy(raw_packet, packet, I40E_SCTPIP_DUMMY_PACKET_LEN);
-
- ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
- sctp = (struct sctphdr *)(raw_packet + IP_HEADER_OFFSET
- + sizeof(struct iphdr));
- ip->daddr = fd_data->dst_ip;
- sctp->dest = fd_data->dst_port;
- ip->saddr = fd_data->src_ip;
- sctp->source = fd_data->src_port;
+ i40e_create_dummy_sctp_packet(raw_packet, ipv4, IPPROTO_SCTP, fd_data);
- if (fd_data->flex_filter) {
- u8 *payload = raw_packet + I40E_SCTPIP_DUMMY_PACKET_LEN;
- __be16 pattern = fd_data->flex_word;
- u16 off = fd_data->flex_offset;
-
- *((__force __be16 *)(payload + off)) = pattern;
- }
+ if (ipv4)
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_SCTPIP_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ else
+ ret = i40e_prepare_fdir_filter
+ (pf, fd_data, add, raw_packet,
+ I40E_SCTPIP6_DUMMY_PACKET_LEN,
+ I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
- dev_info(&pf->pdev->dev,
- "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
- fd_data->pctype, fd_data->fd_id, ret);
- /* Free the packet buffer since it wasn't added to the ring */
kfree(raw_packet);
- return -EOPNOTSUPP;
- } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
- if (add)
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
- else
- dev_info(&pf->pdev->dev,
- "Filter deleted for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
+ return ret;
}
- if (add)
- pf->fd_sctp4_filter_cnt++;
- else
- pf->fd_sctp4_filter_cnt--;
+ i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt,
+ &pf->fd_sctp6_filter_cnt);
return 0;
}
-#define I40E_IP_DUMMY_PACKET_LEN 34
+#define I40E_IP_DUMMY_PACKET_LEN 34
+#define I40E_IP6_DUMMY_PACKET_LEN 54
/**
- * i40e_add_del_fdir_ipv4 - Add/Remove IPv4 Flow Director filters for
+ * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
* a specific flow spec
* @vsi: pointer to the targeted VSI
* @fd_data: the flow director data required for the FDir descriptor
* @add: true adds a filter, false removes it
+ * @ipv4: true is v4, false is v6
*
* Returns 0 if the filters were successfully added or removed
**/
-static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
- struct i40e_fdir_filter *fd_data,
- bool add)
+static int i40e_add_del_fdir_ip(struct i40e_vsi *vsi,
+ struct i40e_fdir_filter *fd_data,
+ bool add,
+ bool ipv4)
{
struct i40e_pf *pf = vsi->back;
- struct iphdr *ip;
+ int payload_offset;
u8 *raw_packet;
+ int iter_start;
+ int iter_end;
int ret;
int i;
- static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
- 0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0};
- for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
- i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+ if (ipv4) {
+ iter_start = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
+ iter_end = I40E_FILTER_PCTYPE_FRAG_IPV4;
+ } else {
+ iter_start = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
+ iter_end = I40E_FILTER_PCTYPE_FRAG_IPV6;
+ }
+
+ for (i = iter_start; i <= iter_end; i++) {
raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
if (!raw_packet)
return -ENOMEM;
- memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
- ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
-
- ip->saddr = fd_data->src_ip;
- ip->daddr = fd_data->dst_ip;
- ip->protocol = 0;
- if (fd_data->flex_filter) {
- u8 *payload = raw_packet + I40E_IP_DUMMY_PACKET_LEN;
- __be16 pattern = fd_data->flex_word;
- u16 off = fd_data->flex_offset;
+ /* IPv6 no header option differs from IPv4 */
+ (void)i40e_create_dummy_packet
+ (raw_packet, ipv4, (ipv4) ? IPPROTO_IP : IPPROTO_NONE,
+ fd_data);
- *((__force __be16 *)(payload + off)) = pattern;
- }
-
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
- if (ret) {
- dev_info(&pf->pdev->dev,
- "PCTYPE:%d, Filter command send failed for fd_id:%d (ret = %d)\n",
- fd_data->pctype, fd_data->fd_id, ret);
- /* The packet buffer wasn't added to the ring so we
- * need to free it now.
- */
- kfree(raw_packet);
- return -EOPNOTSUPP;
- } else if (I40E_DEBUG_FD & pf->hw.debug_mask) {
- if (add)
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
- else
- dev_info(&pf->pdev->dev,
- "Filter deleted for PCTYPE %d loc = %d\n",
- fd_data->pctype, fd_data->fd_id);
- }
+ payload_offset = (ipv4) ? I40E_IP_DUMMY_PACKET_LEN :
+ I40E_IP6_DUMMY_PACKET_LEN;
+ ret = i40e_prepare_fdir_filter(pf, fd_data, add, raw_packet,
+ payload_offset, i);
+ if (ret)
+ goto err;
}
- if (add)
- pf->fd_ip4_filter_cnt++;
- else
- pf->fd_ip4_filter_cnt--;
+ i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt,
+ &pf->fd_ip6_filter_cnt);
return 0;
+err:
+ kfree(raw_packet);
+ return ret;
}
/**
@@ -469,37 +592,68 @@ static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
int i40e_add_del_fdir(struct i40e_vsi *vsi,
struct i40e_fdir_filter *input, bool add)
{
+ enum ip_ver { ipv6 = 0, ipv4 = 1 };
struct i40e_pf *pf = vsi->back;
int ret;
switch (input->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
- ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
break;
case UDP_V4_FLOW:
- ret = i40e_add_del_fdir_udpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
break;
case SCTP_V4_FLOW:
- ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
+ break;
+ case TCP_V6_FLOW:
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
+ break;
+ case UDP_V6_FLOW:
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
+ break;
+ case SCTP_V6_FLOW:
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
break;
case IP_USER_FLOW:
- switch (input->ip4_proto) {
+ switch (input->ipl4_proto) {
case IPPROTO_TCP:
- ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv4);
break;
case IPPROTO_UDP:
- ret = i40e_add_del_fdir_udpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv4);
break;
case IPPROTO_SCTP:
- ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv4);
break;
case IPPROTO_IP:
- ret = i40e_add_del_fdir_ipv4(vsi, input, add);
+ ret = i40e_add_del_fdir_ip(vsi, input, add, ipv4);
break;
default:
/* We cannot support masking based on protocol */
dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n",
- input->ip4_proto);
+ input->ipl4_proto);
+ return -EINVAL;
+ }
+ break;
+ case IPV6_USER_FLOW:
+ switch (input->ipl4_proto) {
+ case IPPROTO_TCP:
+ ret = i40e_add_del_fdir_tcp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_UDP:
+ ret = i40e_add_del_fdir_udp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_SCTP:
+ ret = i40e_add_del_fdir_sctp(vsi, input, add, ipv6);
+ break;
+ case IPPROTO_IP:
+ ret = i40e_add_del_fdir_ip(vsi, input, add, ipv6);
+ break;
+ default:
+ /* We cannot support masking based on protocol */
+ dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n",
+ input->ipl4_proto);
return -EINVAL;
}
break;
@@ -1416,6 +1570,17 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
+/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1443,6 +1608,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->rx_offset = i40e_rx_offset(rx_ring);
/* XDP RX-queue info only needed for RX rings exposed to XDP */
if (rx_ring->vsi->type == I40E_VSI_MAIN) {
@@ -1478,17 +1644,6 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
writel(val, rx_ring->tail);
}
-/**
- * i40e_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static inline unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
-}
-
static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
unsigned int size)
{
@@ -1497,8 +1652,8 @@ static unsigned int i40e_rx_frame_truesize(struct i40e_ring *rx_ring,
#if (PAGE_SIZE < 8192)
truesize = i40e_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
- truesize = i40e_rx_offset(rx_ring) ?
- SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring)) +
+ truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(size + rx_ring->rx_offset) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
#endif
@@ -1549,7 +1704,7 @@ static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = i40e_rx_offset(rx_ring);
+ bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -1809,9 +1964,6 @@ void i40e_process_skb_fields(struct i40e_ring *rx_ring,
* @skb: pointer to current skb being fixed
* @rx_desc: pointer to the EOP Rx descriptor
*
- * Also address the case where we are pulling data in on pages only
- * and as such no data is present in the skb header.
- *
* In addition if skb is not at least 60 bytes we need to pad it so that
* it is large enough to qualify as a valid Ethernet frame.
*
@@ -1844,46 +1996,15 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb,
}
/**
- * i40e_page_is_reusable - check if any reuse is possible
- * @page: page struct to check
- *
- * A page is not reusable if it was allocated under low memory
- * conditions, or it's not in the same NUMA node as this CPU.
- */
-static inline bool i40e_page_is_reusable(struct page *page)
-{
- return (page_to_nid(page) == numa_mem_id()) &&
- !page_is_pfmemalloc(page);
-}
-
-/**
- * i40e_can_reuse_rx_page - Determine if this page can be reused by
- * the adapter for another receive
- *
+ * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buffer: buffer containing the page
* @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
*
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
- * an unused region in the page.
- *
- * For small pages, @truesize will be a constant value, half the size
- * of the memory at page. We'll attempt to alternate between high and
- * low halves of the page, with one half ready for use by the hardware
- * and the other half being consumed by the stack. We use the page
- * ref count to determine whether the stack has finished consuming the
- * portion of this page that was passed up with a previous packet. If
- * the page ref count is >1, we'll assume the "other" half page is
- * still busy, and this page cannot be reused.
- *
- * For larger pages, @truesize will be the actual space used by the
- * received packet (adjusted upward to an even multiple of the cache
- * line size). This will advance through the page by the amount
- * actually consumed by the received packets while there is still
- * space for a buffer. Each region of larger pages will be used at
- * most once, after which the page will not be reused.
- *
- * In either case, if the page is reusable its refcount is increased.
- **/
+ * If page is reusable, we have a green light for calling i40e_reuse_rx_page,
+ * which will assign the current buffer to the buffer that next_to_alloc is
+ * pointing to; otherwise, the DMA mapping needs to be destroyed and
+ * page freed
+ */
static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
@@ -1891,7 +2012,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (unlikely(!i40e_page_is_reusable(page)))
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -1937,7 +2058,7 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
#if (PAGE_SIZE < 8192)
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size + i40e_rx_offset(rx_ring));
+ unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
@@ -2151,25 +2272,13 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
* i40e_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
*
- * This function updates next to clean. If the buffer is an EOP buffer
- * this function exits returning false, otherwise it will place the
- * sk_buff in the next buffer to be chained and return true indicating
- * that this is in fact a non-EOP buffer.
- **/
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
+ */
static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
- union i40e_rx_desc *rx_desc,
- struct sk_buff *skb)
+ union i40e_rx_desc *rx_desc)
{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-
/* if we are the last buffer then there is nothing else to do */
#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
@@ -2344,17 +2453,18 @@ static void i40e_inc_ntc(struct i40e_ring *rx_ring)
**/
static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ unsigned int offset = rx_ring->rx_offset;
+ struct sk_buff *skb = rx_ring->skb;
unsigned int xdp_xmit = 0;
bool failure = false;
struct xdp_buff xdp;
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
+ frame_sz = i40e_rx_frame_truesize(rx_ring, 0);
#endif
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < (unsigned int)budget)) {
struct i40e_rx_buffer *rx_buffer;
@@ -2406,12 +2516,11 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- i40e_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size);
@@ -2448,7 +2557,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
cleaned_count++;
- if (i40e_is_non_eop(rx_ring, rx_desc, skb))
+ i40e_inc_ntc(rx_ring);
+ if (i40e_is_non_eop(rx_ring, rx_desc))
continue;
if (i40e_cleanup_headers(rx_ring, skb, rx_desc)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 5f531b195959..86fed05b4f19 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -387,6 +387,7 @@ struct i40e_ring {
*/
struct i40e_channel *ch;
+ u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
struct xdp_desc *xsk_descs; /* For storing descriptors in the AF_XDP ZC path */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index c0bdc666f557..5c10faaca790 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
+/* Copyright(c) 2013 - 2021 Intel Corporation. */
#ifndef _I40E_TYPE_H_
#define _I40E_TYPE_H_
@@ -517,6 +517,7 @@ struct i40e_dcbx_config {
#define I40E_DCBX_MODE_CEE 0x1
#define I40E_DCBX_MODE_IEEE 0x2
u8 app_mode;
+#define I40E_DCBX_APPS_NON_WILLING 0x1
u32 numapps;
u32 tlv_status; /* CEE mode TLV status */
struct i40e_dcb_ets_config etscfg;
@@ -1420,6 +1421,8 @@ struct i40e_lldp_variables {
#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
#define I40E_VERIFY_TAG_SHIFT 31
#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+#define I40E_VLAN_SRC_SHIFT 55
+#define I40E_VLAN_SRC_MASK (0x1ULL << I40E_VLAN_SRC_SHIFT)
#define I40E_FLEX_50_SHIFT 13
#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 492ce213208d..4f11f7bf75d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -215,9 +215,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
-
- count--;
- } while (count);
+ } while (--count);
no_buffers:
if (rx_ring->next_to_use != ntu) {
@@ -250,27 +248,68 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
xdp->data_end - xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
- return NULL;
+ goto out;
skb_reserve(skb, xdp->data - xdp->data_hard_start);
memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
+out:
xsk_buff_free(xdp);
return skb;
}
-/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp_buff,
+ union i40e_rx_desc *rx_desc,
+ unsigned int *rx_packets,
+ unsigned int *rx_bytes,
+ unsigned int size,
+ unsigned int xdp_res)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ struct sk_buff *skb;
+
+ *rx_packets = 1;
+ *rx_bytes = size;
+
+ if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
+ return;
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
+ if (xdp_res == I40E_XDP_CONSUMED) {
+ xsk_buff_free(xdp_buff);
+ return;
+ }
+
+ if (xdp_res == I40E_XDP_PASS) {
+ /* NB! We are not checking for errors using
+ * i40e_test_staterr with
+ * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
+ * SBP is *not* set in PRT_SBPVSI (default not set).
+ */
+ skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ *rx_packets = 0;
+ *rx_bytes = 0;
+ return;
+ }
+
+ if (eth_skb_pad(skb)) {
+ *rx_packets = 0;
+ *rx_bytes = 0;
+ return;
+ }
+
+ *rx_bytes = skb->len;
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ return;
+ }
+
+ /* Should never get here, as all valid cases have been handled already.
+ */
+ WARN_ON_ONCE(1);
}
/**
@@ -284,17 +323,20 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ u16 next_to_clean = rx_ring->next_to_clean;
+ u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
- struct sk_buff *skb;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
- struct xdp_buff **bi;
+ unsigned int rx_packets;
+ unsigned int rx_bytes;
+ struct xdp_buff *bi;
unsigned int size;
u64 qword;
- rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
@@ -307,11 +349,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
- bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- xsk_buff_free(*bi);
- *bi = NULL;
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
+ bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ xsk_buff_free(bi);
+ next_to_clean = (next_to_clean + 1) & count_mask;
continue;
}
@@ -320,53 +360,22 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!size)
break;
- bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- (*bi)->data_end = (*bi)->data + size;
- xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
-
- xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
- if (xdp_res) {
- if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(*bi);
-
- *bi = NULL;
- total_rx_bytes += size;
- total_rx_packets++;
-
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
- continue;
- }
-
- /* XDP_PASS path */
-
- /* NB! We are not checking for errors using
- * i40e_test_staterr with
- * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
- * SBP is *not* set in PRT_SBPVSI (default not set).
- */
- skb = i40e_construct_skb_zc(rx_ring, *bi);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- break;
- }
-
- *bi = NULL;
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
-
- if (eth_skb_pad(skb))
- continue;
-
- total_rx_bytes += skb->len;
- total_rx_packets++;
-
- i40e_process_skb_fields(rx_ring, rx_desc, skb);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ bi->data_end = bi->data + size;
+ xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
+
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+ i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
+ &rx_bytes, size, xdp_res);
+ total_rx_packets += rx_packets;
+ total_rx_bytes += rx_bytes;
+ xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
+ next_to_clean = (next_to_clean + 1) & count_mask;
}
+ rx_ring->next_to_clean = next_to_clean;
+ cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
+
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
@@ -374,7 +383,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
- if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ if (failure || next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
@@ -604,16 +613,14 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!rx_bi)
- continue;
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
xsk_buff_free(rx_bi);
- rx_bi = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 256fa07d54d5..ffaf2742a2e0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1142,19 +1142,6 @@ static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
}
/**
- * iavf_page_is_reusable - check if any reuse is possible
- * @page: page struct to check
- *
- * A page is not reusable if it was allocated under low memory
- * conditions, or it's not in the same NUMA node as this CPU.
- */
-static inline bool iavf_page_is_reusable(struct page *page)
-{
- return (page_to_nid(page) == numa_mem_id()) &&
- !page_is_pfmemalloc(page);
-}
-
-/**
* iavf_can_reuse_rx_page - Determine if this page can be reused by
* the adapter for another receive
*
@@ -1187,7 +1174,7 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
struct page *page = rx_buffer->page;
/* Is any reuse possible? */
- if (unlikely(!iavf_page_is_reusable(page)))
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index ed08ace4f05a..647e7fde11b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -911,7 +911,7 @@ static void iavf_print_link_message(struct iavf_adapter *adapter)
return;
}
- speed = kcalloc(1, IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
+ speed = kzalloc(IAVF_MAX_SPEED_STRLEN, GFP_KERNEL);
if (!speed)
return;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 6da4f43f2348..73da4f71f530 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -24,6 +24,7 @@ ice-y := ice_main.o \
ice_flow.o \
ice_devlink.o \
ice_fw_update.o \
+ ice_lag.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index fa1e128c24ec..dae8280ce17c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -39,6 +39,7 @@
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include <net/geneve.h>
#include <net/gre.h>
#include <net/udp_tunnel.h>
@@ -55,6 +56,7 @@
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
+#include "ice_lag.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -166,7 +168,7 @@ struct ice_tc_cfg {
struct ice_res_tracker {
u16 num_entries;
u16 end;
- u16 list[1];
+ u16 list[];
};
struct ice_qs_cfg {
@@ -326,9 +328,11 @@ struct ice_vsi {
struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- struct xsk_buff_pool **xsk_pools;
- u16 num_xsk_pools_used;
- u16 num_xsk_pools;
+
+ /* setup back reference, to which aggregator node this VSI
+ * corresponds to
+ */
+ struct ice_agg_node *agg_node;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -377,6 +381,13 @@ enum ice_pf_flags {
ICE_PF_FLAGS_NBITS /* must be last */
};
+struct ice_agg_node {
+ u32 agg_id;
+#define ICE_MAX_VSIS_IN_AGG_NODE 64
+ u32 num_vsis;
+ u8 valid;
+};
+
struct ice_pf {
struct pci_dev *pdev;
@@ -455,6 +466,15 @@ struct ice_pf {
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
+ struct ice_lag *lag; /* Link Aggregation information */
+
+#define ICE_INVALID_AGG_NODE_ID 0
+#define ICE_PF_AGG_NODE_ID_START 1
+#define ICE_MAX_PF_AGG_NODES 32
+ struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES];
+#define ICE_VF_AGG_NODE_ID_START 65
+#define ICE_MAX_VF_AGG_NODES 32
+ struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
};
struct ice_netdev_priv {
@@ -517,17 +537,15 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
- struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
- !ice_is_xdp_ena_vsi(ring->vsi))
+ if (!ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
- return pools[qid];
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}
/**
@@ -557,11 +575,31 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
return pf->vsi[pf->ctrl_vsi_idx];
}
+/**
+ * ice_set_sriov_cap - enable SRIOV in PF flags
+ * @pf: PF struct
+ */
+static inline void ice_set_sriov_cap(struct ice_pf *pf)
+{
+ if (pf->hw.func_caps.common_cap.sr_iov_1_1)
+ set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+}
+
+/**
+ * ice_clear_sriov_cap - disable SRIOV in PF flags
+ * @pf: PF struct
+ */
+static inline void ice_clear_sriov_cap(struct ice_pf *pf)
+{
+ clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+}
+
#define ICE_FD_STAT_CTR_BLOCK_COUNT 256
#define ICE_FD_STAT_PF_IDX(base_idx) \
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+bool netif_is_ice(struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index b06fbe99d8e9..80186589153b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -695,6 +695,18 @@ struct ice_aqc_sched_elem_cmd {
__le32 addr_low;
};
+struct ice_aqc_txsched_move_grp_info_hdr {
+ __le32 src_parent_teid;
+ __le32 dest_parent_teid;
+ __le16 num_elems;
+ __le16 reserved;
+};
+
+struct ice_aqc_move_elem {
+ struct ice_aqc_txsched_move_grp_info_hdr hdr;
+ __le32 teid[];
+};
+
struct ice_aqc_elem_info_bw {
__le16 bw_profile_idx;
__le16 bw_alloc;
@@ -1334,33 +1346,6 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
-/* The result of netlist NVM read comes in a TLV format. The actual data
- * (netlist header) starts from word offset 1 (byte 2). The FW strips
- * out the type field from the TLV header so all the netlist fields
- * should adjust their offset value by 1 word (2 bytes) in order to map
- * their correct location.
- */
-#define ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID 0x11B
-#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET 1
-#define ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN 2 /* In bytes */
-#define ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET 2
-#define ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN 2 /* In bytes */
-#define ICE_AQC_NVM_NETLIST_NODE_COUNT_M ICE_M(0x3FF, 0)
-#define ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET 5
-#define ICE_AQC_NVM_NETLIST_ID_BLK_LEN 0x30 /* In words */
-
-/* netlist ID block field offsets (word offsets) */
-#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW 2
-#define ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH 3
-#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW 4
-#define ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH 5
-#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW 6
-#define ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH 7
-#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW 8
-#define ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH 9
-#define ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH 0xA
-#define ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER 0x2F
-
/* Used for NVM Set Package Data command - 0x070A */
struct ice_aqc_nvm_pkg_data {
u8 reserved[3];
@@ -1555,6 +1540,16 @@ struct ice_aqc_lldp_stop_start_specific_agent {
u8 reserved[15];
};
+/* LLDP Filter Control (direct 0x0A0A) */
+struct ice_aqc_lldp_filter_ctrl {
+ u8 cmd_flags;
+#define ICE_AQC_LLDP_FILTER_ACTION_ADD 0x0
+#define ICE_AQC_LLDP_FILTER_ACTION_DELETE 0x1
+ u8 reserved1;
+ __le16 vsi_num;
+ u8 reserved2[12];
+};
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
@@ -1878,6 +1873,7 @@ struct ice_aq_desc {
struct ice_aqc_lldp_start lldp_start;
struct ice_aqc_lldp_set_local_mib lldp_set_mib;
struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
+ struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
struct ice_aqc_add_txqs add_txqs;
@@ -1977,6 +1973,7 @@ enum ice_adminq_opc {
ice_aqc_opc_add_sched_elems = 0x0401,
ice_aqc_opc_cfg_sched_elems = 0x0403,
ice_aqc_opc_get_sched_elems = 0x0404,
+ ice_aqc_opc_move_sched_elems = 0x0408,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_query_port_ets = 0x040E,
@@ -2018,6 +2015,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
+ ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 6d7e7dd0ebe2..3d9475e222cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -110,7 +110,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
if (status)
return status;
- resp = (struct ice_aqc_manage_mac_read_resp *)buf;
+ resp = buf;
flags = le16_to_cpu(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
@@ -907,6 +907,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
goto err_unroll_alloc;
}
+ ice_sched_get_psm_clk_freq(hw);
/* Initialize port_info struct with scheduler data */
status = ice_sched_init_port(hw->port_info);
@@ -1653,7 +1654,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
if (!buf)
return ICE_ERR_PARAM;
- if (buf_size < (num_entries * sizeof(buf->elem[0])))
+ if (buf_size < flex_array_size(buf, elem, num_entries))
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
@@ -1979,7 +1980,7 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
struct ice_aqc_list_caps_elem *cap_resp;
u32 i;
- cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+ cap_resp = buf;
memset(func_p, 0, sizeof(*func_p));
@@ -2109,7 +2110,7 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
struct ice_aqc_list_caps_elem *cap_resp;
u32 i;
- cap_resp = (struct ice_aqc_list_caps_elem *)buf;
+ cap_resp = buf;
memset(dev_p, 0, sizeof(*dev_p));
@@ -4078,6 +4079,7 @@ static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
for (i = 0; i < ICE_SW_LKUP_LAST; i++)
list_replace_init(&sw->recp_list[i].filt_rules,
&sw->recp_list[i].filt_replay_rules);
+ ice_sched_replay_agg_vsi_preinit(hw);
return 0;
}
@@ -4109,6 +4111,8 @@ enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
return status;
/* Replay per VSI all filters */
status = ice_replay_vsi_all_fltr(hw, vsi_handle);
+ if (!status)
+ status = ice_replay_vsi_agg(hw, vsi_handle);
return status;
}
@@ -4122,6 +4126,7 @@ void ice_replay_post(struct ice_hw *hw)
{
/* Delete old entries from replay filter list head */
ice_rm_all_sw_replay_rule_info(hw);
+ ice_sched_replay_agg(hw);
}
/**
@@ -4366,3 +4371,50 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
}
+
+/**
+ * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
+ * @hw: pointer to HW struct
+ */
+bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
+{
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
+ if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
+ return true;
+ if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
+ hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
+ return true;
+ } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
+ * @hw: pointer to HW struct
+ * @vsi_num: absolute HW index for VSI
+ * @add: boolean for if adding or removing a filter
+ */
+enum ice_status
+ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+{
+ struct ice_aqc_lldp_filter_ctrl *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.lldp_filter_ctrl;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
+
+ if (add)
+ cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
+ else
+ cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
+
+ cmd->vsi_num = cpu_to_le16(vsi_num);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 3ebb973878c7..baf4064fcbfe 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -175,4 +175,7 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
+bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
+enum ice_status
+ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 4db12d1f5808..b2d8a5932b1d 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -838,7 +838,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*/
static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
{
- struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
+ struct ice_aq_desc *cq_desc = desc;
u16 len;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
@@ -868,7 +868,7 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
if (buf_len < len)
len = buf_len;
- ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
+ ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 2a3147ee0bbb..e42727941ef5 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -850,9 +850,9 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
return ICE_ERR_PARAM;
if (dcbx_mode == ICE_DCBX_MODE_IEEE)
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
else if (dcbx_mode == ICE_DCBX_MODE_CEE)
- dcbx_cfg = &pi->desired_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.desired_dcbx_cfg;
/* Get Local DCB Config in case of ICE_DCBX_MODE_IEEE
* or get CEE DCB Desired Config in case of ICE_DCBX_MODE_CEE
@@ -863,7 +863,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
goto out;
/* Get Remote DCB Config */
- dcbx_cfg = &pi->remote_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
@@ -892,14 +892,14 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
if (!ret) {
/* CEE mode */
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_CEE;
dcbx_cfg->tlv_status = le32_to_cpu(cee_cfg.tlv_status);
ice_cee_to_dcb_cfg(&cee_cfg, dcbx_cfg);
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
} else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_IEEE);
}
@@ -916,26 +916,26 @@ enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
*/
enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
- struct ice_port_info *pi = hw->port_info;
+ struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret = 0;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
- pi->is_sw_lldp = true;
+ qos_cfg->is_sw_lldp = true;
/* Get DCBX status */
- pi->dcbx_status = ice_get_dcbx_status(hw);
+ qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
- if (pi->dcbx_status == ICE_DCBX_STATUS_DONE ||
- pi->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
- pi->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
+ if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DONE ||
+ qos_cfg->dcbx_status == ICE_DCBX_STATUS_IN_PROGRESS ||
+ qos_cfg->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
/* Get current DCBX configuration */
- ret = ice_get_dcb_cfg(pi);
+ ret = ice_get_dcb_cfg(hw->port_info);
if (ret)
return ret;
- pi->is_sw_lldp = false;
- } else if (pi->dcbx_status == ICE_DCBX_STATUS_DIS) {
+ qos_cfg->is_sw_lldp = false;
+ } else if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS) {
return ICE_ERR_NOT_READY;
}
@@ -943,7 +943,7 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
if (enable_mib_change) {
ret = ice_aq_cfg_lldp_mib_change(hw, true, NULL);
if (ret)
- pi->is_sw_lldp = true;
+ qos_cfg->is_sw_lldp = true;
}
return ret;
@@ -958,21 +958,21 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*/
enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
- struct ice_port_info *pi = hw->port_info;
+ struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
enum ice_status ret;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
/* Get DCBX status */
- pi->dcbx_status = ice_get_dcbx_status(hw);
+ qos_cfg->dcbx_status = ice_get_dcbx_status(hw);
- if (pi->dcbx_status == ICE_DCBX_STATUS_DIS)
+ if (qos_cfg->dcbx_status == ICE_DCBX_STATUS_DIS)
return ICE_ERR_NOT_READY;
ret = ice_aq_cfg_lldp_mib_change(hw, ena_mib, NULL);
if (!ret)
- pi->is_sw_lldp = !ena_mib;
+ qos_cfg->is_sw_lldp = !ena_mib;
return ret;
}
@@ -1270,7 +1270,7 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
hw = pi->hw;
/* update the HW local config */
- dcbcfg = &pi->local_dcbx_cfg;
+ dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
/* Allocate the LLDPDU */
lldpmib = devm_kzalloc(ice_hw_to_dev(hw), ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib)
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 36abd6b7280c..1e8f71ffc8ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -28,7 +28,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
- dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
@@ -134,7 +134,7 @@ static u8 ice_dcb_get_mode(struct ice_port_info *port_info, bool host)
else
mode = DCB_CAP_DCBX_LLD_MANAGED;
- if (port_info->local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
+ if (port_info->qos_cfg.local_dcbx_cfg.dcbx_mode & ICE_DCBX_MODE_CEE)
return mode | DCB_CAP_DCBX_VER_CEE;
else
return mode | DCB_CAP_DCBX_VER_IEEE;
@@ -277,10 +277,10 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
int ret = ICE_DCB_NO_HW_CHG;
struct ice_vsi *pf_vsi;
- curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
/* FW does not care if change happened */
- if (!pf->hw.port_info->is_sw_lldp)
+ if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
ret = ICE_DCB_HW_CHG_RST;
/* Enable DCB tagging only when more than one TC */
@@ -327,7 +327,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place.
*/
- if (pf->hw.port_info->is_sw_lldp) {
+ if (pf->hw.port_info->qos_cfg.is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(dev, "Set DCB Config failed\n");
@@ -360,7 +360,7 @@ free_cfg:
*/
static void ice_cfg_etsrec_defaults(struct ice_port_info *pi)
{
- struct ice_dcbx_cfg *dcbcfg = &pi->local_dcbx_cfg;
+ struct ice_dcbx_cfg *dcbcfg = &pi->qos_cfg.local_dcbx_cfg;
u8 i;
/* Ensure ETS recommended DCB configuration is not already set */
@@ -446,7 +446,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
mutex_lock(&pf->tc_mutex);
- if (!pf->hw.port_info->is_sw_lldp)
+ if (!pf->hw.port_info->qos_cfg.is_sw_lldp)
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
@@ -455,9 +455,9 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
- if (!pf->hw.port_info->is_sw_lldp) {
+ if (!pf->hw.port_info->qos_cfg.is_sw_lldp) {
ret = ice_cfg_lldp_mib_change(&pf->hw, true);
- if (ret && !pf->hw.port_info->is_sw_lldp) {
+ if (ret && !pf->hw.port_info->qos_cfg.is_sw_lldp) {
dev_err(dev, "Failed to register for MIB changes\n");
goto dcb_error;
}
@@ -510,11 +510,12 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
int ret = 0;
pi = pf->hw.port_info;
- newcfg = kmemdup(&pi->local_dcbx_cfg, sizeof(*newcfg), GFP_KERNEL);
+ newcfg = kmemdup(&pi->qos_cfg.local_dcbx_cfg, sizeof(*newcfg),
+ GFP_KERNEL);
if (!newcfg)
return -ENOMEM;
- memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
+ memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(ice_pf_to_dev(pf), "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg, locked))
@@ -545,7 +546,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
if (!dcbcfg)
return -ENOMEM;
- memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
+ memset(&pi->qos_cfg.local_dcbx_cfg, 0, sizeof(*dcbcfg));
dcbcfg->etscfg.willing = ets_willing ? 1 : 0;
dcbcfg->etscfg.maxtcs = hw->func_caps.common_cap.maxtc;
@@ -608,7 +609,7 @@ static bool ice_dcb_tc_contig(u8 *prio_table)
*/
static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
{
- struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct device *dev = ice_pf_to_dev(pf);
int ret;
@@ -638,7 +639,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
*/
void ice_pf_dcb_recfg(struct ice_pf *pf)
{
- struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+ struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
u8 tc_map = 0;
int v, ret;
@@ -691,7 +692,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
port_info = hw->port_info;
err = ice_init_dcb(hw, false);
- if (err && !port_info->is_sw_lldp) {
+ if (err && !port_info->qos_cfg.is_sw_lldp) {
dev_err(dev, "Error initializing DCB %d\n", err);
goto dcb_init_err;
}
@@ -858,7 +859,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
- &pi->remote_dcbx_cfg);
+ &pi->qos_cfg.remote_dcbx_cfg);
if (ret) {
dev_err(dev, "Failed to get remote DCB config\n");
return;
@@ -868,10 +869,11 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
mutex_lock(&pf->tc_mutex);
/* store the old configuration */
- tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
+ tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */
- memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg));
+ memset(&pi->qos_cfg.local_dcbx_cfg, 0,
+ sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info);
@@ -881,7 +883,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
/* No change detected in DCBX configs */
- if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
+ if (!memcmp(&tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg,
+ sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n");
goto out;
}
@@ -889,13 +892,13 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pf->dcbx_cap = ice_dcb_get_mode(pi, false);
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
- &pi->local_dcbx_cfg);
- ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
+ &pi->qos_cfg.local_dcbx_cfg);
+ ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->qos_cfg.local_dcbx_cfg);
if (!need_reconfig)
goto out;
/* Enable DCB tagging only when more than one TC */
- if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
+ if (ice_dcb_get_num_tc(&pi->qos_cfg.local_dcbx_cfg) > 1) {
dev_dbg(dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 87f91b750d59..fcfefad00d1c 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -34,12 +34,10 @@ static void ice_dcbnl_devreset(struct net_device *netdev)
static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
{
struct ice_dcbx_cfg *dcbxcfg;
- struct ice_port_info *pi;
struct ice_pf *pf;
pf = ice_netdev_to_pf(netdev);
- pi = pf->hw.port_info;
- dcbxcfg = &pi->local_dcbx_cfg;
+ dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
ets->willing = dcbxcfg->etscfg.willing;
ets->ets_cap = dcbxcfg->etscfg.maxtcs;
@@ -74,7 +72,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
@@ -159,6 +157,7 @@ static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_qos_cfg *qos_cfg;
/* No support for LLD_MANAGED modes or CEE+IEEE */
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -171,10 +170,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
return ICE_DCB_NO_HW_CHG;
pf->dcbx_cap = mode;
+ qos_cfg = &pf->hw.port_info->qos_cfg;
if (mode & DCB_CAP_DCBX_VER_CEE)
- pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
+ qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
else
- pf->hw.port_info->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+ qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
@@ -225,7 +225,7 @@ static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
struct ice_dcbx_cfg *dcbxcfg;
int i;
- dcbxcfg = &pi->local_dcbx_cfg;
+ dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
pfc->pfc_cap = dcbxcfg->pfc.pfccap;
pfc->pfc_en = dcbxcfg->pfc.pfcena;
pfc->mbc = dcbxcfg->pfc.mbc;
@@ -256,7 +256,7 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
mutex_lock(&pf->tc_mutex);
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
if (pfc->pfc_cap)
new_cfg->pfc.pfccap = pfc->pfc_cap;
@@ -293,9 +293,9 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
if (prio >= ICE_MAX_USER_PRIORITY)
return;
- *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
+ *setting = (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
- prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
+ prio, *setting, pi->qos_cfg.local_dcbx_cfg.pfc.pfcena);
}
/**
@@ -316,7 +316,7 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
if (prio >= ICE_MAX_USER_PRIORITY)
return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
if (set)
@@ -338,7 +338,7 @@ static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
struct ice_port_info *pi = pf->hw.port_info;
/* Return enabled if any UP enabled for PFC */
- if (pi->local_dcbx_cfg.pfc.pfcena)
+ if (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena)
return 1;
return 0;
@@ -378,8 +378,8 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
if (state) {
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
- memcpy(&pf->hw.port_info->desired_dcbx_cfg,
- &pf->hw.port_info->local_dcbx_cfg,
+ memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg,
+ &pf->hw.port_info->qos_cfg.local_dcbx_cfg,
sizeof(struct ice_dcbx_cfg));
} else {
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
@@ -413,7 +413,7 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
if (prio >= ICE_MAX_USER_PRIORITY)
return;
- *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+ *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
*pgid);
}
@@ -444,7 +444,7 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
/* prio_type, bwg_id and bw_pct per UP are not supported */
@@ -474,7 +474,7 @@ ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return;
- *bw_pct = pi->local_dcbx_cfg.etscfg.tcbwtable[pgid];
+ *bw_pct = pi->qos_cfg.local_dcbx_cfg.etscfg.tcbwtable[pgid];
dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
pgid, *bw_pct);
}
@@ -498,7 +498,7 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
}
@@ -528,7 +528,7 @@ ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
if (prio >= ICE_MAX_USER_PRIORITY)
return;
- *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
+ *pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
}
/**
@@ -699,9 +699,9 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
mutex_lock(&pf->tc_mutex);
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
- old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
ret = -EINVAL;
@@ -751,7 +751,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
mutex_lock(&pf->tc_mutex);
- old_cfg = &pf->hw.port_info->local_dcbx_cfg;
+ old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
if (old_cfg->numapps <= 1)
goto delapp_out;
@@ -760,7 +760,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
if (ret)
goto delapp_out;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
for (i = 1; i < new_cfg->numapps; i++) {
if (app->selector == new_cfg->app[i].selector &&
@@ -813,7 +813,7 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG;
- new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
@@ -884,7 +884,7 @@ void ice_dcbnl_set_all(struct ice_vsi *vsi)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
- dcbxcfg = &pi->local_dcbx_cfg;
+ dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
for (i = 0; i < dcbxcfg->numapps; i++) {
u8 prio, tc_map;
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 29d6192b15f3..cf685eeea198 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -6,132 +6,226 @@
#include "ice_devlink.h"
#include "ice_fw_update.h"
-static void ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
+/* context for devlink info version reporting */
+struct ice_info_ctx {
+ char buf[128];
+ struct ice_orom_info pending_orom;
+ struct ice_nvm_info pending_nvm;
+ struct ice_netlist_info pending_netlist;
+ struct ice_hw_dev_caps dev_caps;
+};
+
+/* The following functions are used to format specific strings for various
+ * devlink info versions. The ctx parameter is used to provide the storage
+ * buffer, as well as any ancillary information calculated when the info
+ * request was made.
+ *
+ * If a version does not exist, for example when attempting to get the
+ * inactive version of flash when there is no pending update, the function
+ * should leave the buffer in the ctx structure empty and return 0.
+ */
+
+static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
u8 dsn[8];
/* Copy the DSN into an array in Big Endian format */
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
- snprintf(buf, len, "%8phD", dsn);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
}
-static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
enum ice_status status;
- status = ice_read_pba_string(hw, (u8 *)buf, len);
+ status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf));
if (status)
return -EIO;
return 0;
}
-static int ice_info_fw_mgmt(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(buf, len, "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
hw->fw_patch);
return 0;
}
-static int ice_info_fw_api(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(buf, len, "%u.%u", hw->api_maj_ver, hw->api_min_ver);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
return 0;
}
-static int ice_info_fw_build(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(buf, len, "0x%08x", hw->fw_build);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
+
+ return 0;
+}
+
+static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_orom_info *orom = &pf->hw.flash.orom;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch);
+
+ return 0;
+}
+
+static int
+ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_orom_info *orom = &ctx->pending_orom;
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
return 0;
}
-static int ice_info_orom_ver(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
- struct ice_orom_info *orom = &pf->hw.nvm.orom;
+ struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
- snprintf(buf, len, "%u.%u.%u", orom->major, orom->build, orom->patch);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
return 0;
}
-static int ice_info_nvm_ver(struct ice_pf *pf, char *buf, size_t len)
+static int
+ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
{
- struct ice_nvm_info *nvm = &pf->hw.nvm;
+ struct ice_nvm_info *nvm = &ctx->pending_nvm;
- snprintf(buf, len, "%x.%02x", nvm->major_ver, nvm->minor_ver);
+ if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
return 0;
}
-static int ice_info_eetrack(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
- struct ice_nvm_info *nvm = &pf->hw.nvm;
+ struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
- snprintf(buf, len, "0x%08x", nvm->eetrack);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
return 0;
}
-static int ice_info_ddp_pkg_name(struct ice_pf *pf, char *buf, size_t len)
+static int
+ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_nvm_info *nvm = &ctx->pending_nvm;
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
+
+ return 0;
+}
+
+static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(buf, len, "%s", hw->active_pkg_name);
+ snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
return 0;
}
-static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
- snprintf(buf, len, "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
pkg->draft);
return 0;
}
-static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
- snprintf(buf, len, "0x%08x", pf->hw.active_track_id);
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
return 0;
}
-static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
- struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+ struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
/* The netlist version fields are BCD formatted */
- snprintf(buf, len, "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
netlist->cust_ver);
return 0;
}
-static int ice_info_netlist_build(struct ice_pf *pf, char *buf, size_t len)
+static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
+
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+
+ return 0;
+}
+
+static int
+ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
{
- struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
+ struct ice_netlist_info *netlist = &ctx->pending_netlist;
- snprintf(buf, len, "0x%08x", netlist->hash);
+ /* The netlist version fields are BCD formatted */
+ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
+ netlist->cust_ver);
return 0;
}
-#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter }
-#define running(key, getter) { ICE_VERSION_RUNNING, key, getter }
+static int
+ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+{
+ struct ice_netlist_info *netlist = &ctx->pending_netlist;
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
+ snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
+
+ return 0;
+}
+
+#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
+#define running(key, getter) { ICE_VERSION_RUNNING, key, getter, NULL }
+#define stored(key, getter, fallback) { ICE_VERSION_STORED, key, getter, fallback }
+
+/* The combined() macro inserts both the running entry as well as a stored
+ * entry. The running entry will always report the version from the active
+ * handler. The stored entry will first try the pending handler, and fallback
+ * to the active handler if the pending function does not report a version.
+ * The pending handler should check the status of a pending update for the
+ * relevant flash component. It should only fill in the buffer in the case
+ * where a valid pending version is available. This ensures that the related
+ * stored and running versions remain in sync, and that stored versions are
+ * correctly reported as expected.
+ */
+#define combined(key, active, pending) \
+ running(key, active), \
+ stored(key, pending, active)
enum ice_version_type {
ICE_VERSION_FIXED,
@@ -142,20 +236,21 @@ enum ice_version_type {
static const struct ice_devlink_version {
enum ice_version_type type;
const char *key;
- int (*getter)(struct ice_pf *pf, char *buf, size_t len);
+ int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
} ice_devlink_versions[] = {
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
running("fw.mgmt.api", ice_info_fw_api),
running("fw.mgmt.build", ice_info_fw_build),
- running(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver),
- running("fw.psid.api", ice_info_nvm_ver),
- running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
+ combined(DEVLINK_INFO_VERSION_GENERIC_FW_UNDI, ice_info_orom_ver, ice_info_pending_orom_ver),
+ combined("fw.psid.api", ice_info_nvm_ver, ice_info_pending_nvm_ver),
+ combined(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack, ice_info_pending_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
- running("fw.netlist", ice_info_netlist_ver),
- running("fw.netlist.build", ice_info_netlist_build),
+ combined("fw.netlist", ice_info_netlist_ver, ice_info_pending_netlist_ver),
+ combined("fw.netlist.build", ice_info_netlist_build, ice_info_pending_netlist_build),
};
/**
@@ -174,60 +269,128 @@ static int ice_devlink_info_get(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
- char buf[100];
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct ice_info_ctx *ctx;
+ enum ice_status status;
size_t i;
int err;
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* discover capabilities first */
+ status = ice_discover_dev_caps(hw, &ctx->dev_caps);
+ if (status) {
+ err = -EIO;
+ goto out_free_ctx;
+ }
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_orom) {
+ status = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
+ if (status) {
+ dev_dbg(dev, "Unable to read inactive Option ROM version data, status %s aq_err %s\n",
+ ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+
+ /* disable display of pending Option ROM */
+ ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
+ }
+ }
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_nvm) {
+ status = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
+ if (status) {
+ dev_dbg(dev, "Unable to read inactive NVM version data, status %s aq_err %s\n",
+ ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+
+ /* disable display of pending Option ROM */
+ ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
+ }
+ }
+
+ if (ctx->dev_caps.common_cap.nvm_update_pending_netlist) {
+ status = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
+ if (status) {
+ dev_dbg(dev, "Unable to read inactive Netlist version data, status %s aq_err %s\n",
+ ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+
+ /* disable display of pending Option ROM */
+ ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
+ }
+ }
+
err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set driver name");
- return err;
+ goto out_free_ctx;
}
- ice_info_get_dsn(pf, buf, sizeof(buf));
+ ice_info_get_dsn(pf, ctx);
- err = devlink_info_serial_number_put(req, buf);
+ err = devlink_info_serial_number_put(req, ctx->buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set serial number");
- return err;
+ goto out_free_ctx;
}
for (i = 0; i < ARRAY_SIZE(ice_devlink_versions); i++) {
enum ice_version_type type = ice_devlink_versions[i].type;
const char *key = ice_devlink_versions[i].key;
- err = ice_devlink_versions[i].getter(pf, buf, sizeof(buf));
+ memset(ctx->buf, 0, sizeof(ctx->buf));
+
+ err = ice_devlink_versions[i].getter(pf, ctx);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- return err;
+ goto out_free_ctx;
+ }
+
+ /* If the default getter doesn't report a version, use the
+ * fallback function. This is primarily useful in the case of
+ * "stored" versions that want to report the same value as the
+ * running version in the normal case of no pending update.
+ */
+ if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) {
+ err = ice_devlink_versions[i].fallback(pf, ctx);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
+ goto out_free_ctx;
+ }
}
+ /* Do not report missing versions */
+ if (ctx->buf[0] == '\0')
+ continue;
+
switch (type) {
case ICE_VERSION_FIXED:
- err = devlink_info_version_fixed_put(req, key, buf);
+ err = devlink_info_version_fixed_put(req, key, ctx->buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set fixed version");
- return err;
+ goto out_free_ctx;
}
break;
case ICE_VERSION_RUNNING:
- err = devlink_info_version_running_put(req, key, buf);
+ err = devlink_info_version_running_put(req, key, ctx->buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
- return err;
+ goto out_free_ctx;
}
break;
case ICE_VERSION_STORED:
- err = devlink_info_version_stored_put(req, key, buf);
+ err = devlink_info_version_stored_put(req, key, ctx->buf);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
- return err;
+ goto out_free_ctx;
}
break;
}
}
- return 0;
+out_free_ctx:
+ kfree(ctx);
+ return err;
}
/**
@@ -433,7 +596,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
void *nvm_data;
u32 nvm_size;
- nvm_size = hw->nvm.flash_size;
+ nvm_size = hw->flash.flash_size;
nvm_data = vzalloc(nvm_size);
if (!nvm_data)
return -ENOMEM;
@@ -533,7 +696,7 @@ void ice_devlink_init_regions(struct ice_pf *pf)
struct device *dev = ice_pf_to_dev(pf);
u64 nvm_size;
- nvm_size = pf->hw.nvm.flash_size;
+ nvm_size = pf->hw.flash.flash_size;
pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
nvm_size);
if (IS_ERR(pf->nvm_region)) {
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 69c113a4de7e..5636c9b23896 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -179,8 +179,8 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct ice_orom_info *orom;
struct ice_nvm_info *nvm;
- nvm = &hw->nvm;
- orom = &nvm->orom;
+ nvm = &hw->flash.nvm;
+ orom = &hw->flash.orom;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
@@ -188,7 +188,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
* determined) which contains more pertinent information.
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%x.%02x 0x%x %d.%d.%d", nvm->major_ver, nvm->minor_ver,
+ "%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
nvm->eetrack, orom->major, orom->build, orom->patch);
strscpy(drvinfo->bus_info, pci_name(pf->pdev),
@@ -250,7 +250,7 @@ static int ice_get_eeprom_len(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
- return (int)pf->hw.nvm.flash_size;
+ return (int)pf->hw.flash.flash_size;
}
static int
@@ -1242,6 +1242,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
enum ice_status status;
bool dcbx_agent_status;
+ /* Remove rule to direct LLDP packets to default VSI.
+ * The FW LLDP engine will now be consuming them.
+ */
+ ice_cfg_sw_lldp(vsi, false, false);
+
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
*/
@@ -1270,11 +1275,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
if (status)
dev_dbg(dev, "Fail to init DCB\n");
- /* Remove rule to direct LLDP packets to default VSI.
- * The FW LLDP engine will now be consuming them.
- */
- ice_cfg_sw_lldp(vsi, false, false);
-
/* Register for MIB change events */
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
@@ -2979,7 +2979,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pause->rx_pause = 0;
pause->tx_pause = 0;
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
@@ -3031,7 +3031,7 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
- dcbx_cfg = &pi->local_dcbx_cfg;
+ dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* Changing the port's flow control is not supported if this isn't the
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index f5e81b555353..5e1fd30c0a0f 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -1525,7 +1525,7 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
bld->reserved_section_table_entries += count;
data_end = le16_to_cpu(buf->data_end) +
- (count * sizeof(buf->section_entry[0]));
+ flex_array_size(buf, section_entry, count);
buf->data_end = cpu_to_le16(data_end);
return 0;
@@ -2727,7 +2727,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_XLT1_RSS:
case ICE_SID_XLT1_ACL:
case ICE_SID_XLT1_PE:
- xlt1 = (struct ice_xlt1_section *)sect;
+ xlt1 = sect;
src = xlt1->value;
sect_len = le16_to_cpu(xlt1->count) *
sizeof(*hw->blk[block_id].xlt1.t);
@@ -2740,7 +2740,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_XLT2_RSS:
case ICE_SID_XLT2_ACL:
case ICE_SID_XLT2_PE:
- xlt2 = (struct ice_xlt2_section *)sect;
+ xlt2 = sect;
src = (__force u8 *)xlt2->value;
sect_len = le16_to_cpu(xlt2->count) *
sizeof(*hw->blk[block_id].xlt2.t);
@@ -2753,7 +2753,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_PROFID_TCAM_RSS:
case ICE_SID_PROFID_TCAM_ACL:
case ICE_SID_PROFID_TCAM_PE:
- pid = (struct ice_prof_id_section *)sect;
+ pid = sect;
src = (u8 *)pid->entry;
sect_len = le16_to_cpu(pid->count) *
sizeof(*hw->blk[block_id].prof.t);
@@ -2766,7 +2766,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_PROFID_REDIR_RSS:
case ICE_SID_PROFID_REDIR_ACL:
case ICE_SID_PROFID_REDIR_PE:
- pr = (struct ice_prof_redir_section *)sect;
+ pr = sect;
src = pr->redir_value;
sect_len = le16_to_cpu(pr->count) *
sizeof(*hw->blk[block_id].prof_redir.t);
@@ -2779,7 +2779,7 @@ static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
case ICE_SID_FLD_VEC_RSS:
case ICE_SID_FLD_VEC_ACL:
case ICE_SID_FLD_VEC_PE:
- es = (struct ice_sw_fv_section *)sect;
+ es = sect;
src = (u8 *)es->fv;
sect_len = (u32)(le16_to_cpu(es->count) *
hw->blk[block_id].es.fvw) *
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 8f81b95e679c..dcec0360ce55 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -417,6 +417,11 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
return err;
}
+/* Length in seconds to wait before timing out when erasing a flash module.
+ * Yes, erasing really can take minutes to complete.
+ */
+#define ICE_FW_ERASE_TIMEOUT 300
+
/**
* ice_erase_nvm_module - Erase an NVM module and await firmware completion
* @pf: the PF data structure
@@ -449,7 +454,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
devlink = priv_to_devlink(pf);
- devlink_flash_update_status_notify(devlink, "Erasing", component, 0, 0);
+ devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
status = ice_aq_erase_nvm(hw, module, NULL);
if (status) {
@@ -461,8 +466,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
- /* Yes, this really can take minutes to complete */
- err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, 300 * HZ, &event);
+ err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event);
if (err) {
dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
component, module, err);
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 90abc8612a6a..093a1818a392 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -86,6 +86,9 @@
#define QRXFLXP_CNTXT_RXDID_PRIO_S 8
#define QRXFLXP_CNTXT_RXDID_PRIO_M ICE_M(0x7, 8)
#define QRXFLXP_CNTXT_TS_M BIT(11)
+#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S 4
+#define GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M ICE_M(0x3, 4)
+#define GLGEN_CLKSTAT_SRC 0x000B826C
#define GLGEN_RSTAT 0x000B8188
#define GLGEN_RSTAT_DEVSTATE_M ICE_M(0x3, 0)
#define GLGEN_RSTCTL 0x000B8180
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
new file mode 100644
index 000000000000..4599fc3b4ed8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+/* Link Aggregation code */
+
+#include "ice.h"
+#include "ice_lag.h"
+
+/**
+ * ice_lag_nop_handler - no-op Rx handler to disable LAG
+ * @pskb: pointer to skb pointer
+ */
+rx_handler_result_t ice_lag_nop_handler(struct sk_buff __always_unused **pskb)
+{
+ return RX_HANDLER_PASS;
+}
+
+/**
+ * ice_lag_set_primary - set PF LAG state as Primary
+ * @lag: LAG info struct
+ */
+static void ice_lag_set_primary(struct ice_lag *lag)
+{
+ struct ice_pf *pf = lag->pf;
+
+ if (!pf)
+ return;
+
+ if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_BACKUP) {
+ dev_warn(ice_pf_to_dev(pf), "%s: Attempt to be Primary, but incompatible state.\n",
+ netdev_name(lag->netdev));
+ return;
+ }
+
+ lag->role = ICE_LAG_PRIMARY;
+}
+
+/**
+ * ice_lag_set_backup - set PF LAG state to Backup
+ * @lag: LAG info struct
+ */
+static void ice_lag_set_backup(struct ice_lag *lag)
+{
+ struct ice_pf *pf = lag->pf;
+
+ if (!pf)
+ return;
+
+ if (lag->role != ICE_LAG_UNSET && lag->role != ICE_LAG_PRIMARY) {
+ dev_dbg(ice_pf_to_dev(pf), "%s: Attempt to be Backup, but incompatible state\n",
+ netdev_name(lag->netdev));
+ return;
+ }
+
+ lag->role = ICE_LAG_BACKUP;
+}
+
+/**
+ * ice_display_lag_info - print LAG info
+ * @lag: LAG info struct
+ */
+static void ice_display_lag_info(struct ice_lag *lag)
+{
+ const char *name, *peer, *upper, *role, *bonded, *master;
+ struct device *dev = &lag->pf->pdev->dev;
+
+ name = lag->netdev ? netdev_name(lag->netdev) : "unset";
+ peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
+ upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
+ master = lag->master ? "TRUE" : "FALSE";
+ bonded = lag->bonded ? "BONDED" : "UNBONDED";
+
+ switch (lag->role) {
+ case ICE_LAG_NONE:
+ role = "NONE";
+ break;
+ case ICE_LAG_PRIMARY:
+ role = "PRIMARY";
+ break;
+ case ICE_LAG_BACKUP:
+ role = "BACKUP";
+ break;
+ case ICE_LAG_UNSET:
+ role = "UNSET";
+ break;
+ default:
+ role = "ERROR";
+ }
+
+ dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
+ bonded, peer, upper, role, master);
+}
+
+/**
+ * ice_lag_info_event - handle NETDEV_BONDING_INFO event
+ * @lag: LAG info struct
+ * @ptr: opaque data pointer
+ *
+ * ptr is to be cast to (netdev_notifier_bonding_info *)
+ */
+static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *event_netdev, *netdev_tmp;
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
+ const char *lag_netdev_name;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ info = ptr;
+ lag_netdev_name = netdev_name(lag->netdev);
+ bonding_info = &info->bonding_info;
+
+ if (event_netdev != lag->netdev || !lag->bonded || !lag->upper_netdev)
+ return;
+
+ if (bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) {
+ netdev_dbg(lag->netdev, "Bonding event recv, but mode not active/backup\n");
+ goto lag_out;
+ }
+
+ if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
+ netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
+ goto lag_out;
+ }
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
+ if (!netif_is_ice(netdev_tmp))
+ continue;
+
+ if (netdev_tmp && netdev_tmp != lag->netdev &&
+ lag->peer_netdev != netdev_tmp) {
+ dev_hold(netdev_tmp);
+ lag->peer_netdev = netdev_tmp;
+ }
+ }
+ rcu_read_unlock();
+
+ if (bonding_info->slave.state)
+ ice_lag_set_backup(lag);
+ else
+ ice_lag_set_primary(lag);
+
+lag_out:
+ ice_display_lag_info(lag);
+}
+
+/**
+ * ice_lag_link - handle LAG link event
+ * @lag: LAG info struct
+ * @info: info from the netdev notifier
+ */
+static void
+ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *netdev_tmp, *upper = info->upper_dev;
+ struct ice_pf *pf = lag->pf;
+ int peers = 0;
+
+ if (lag->bonded)
+ dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
+ netdev_name(lag->netdev));
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper, netdev_tmp)
+ peers++;
+ rcu_read_unlock();
+
+ if (lag->upper_netdev != upper) {
+ dev_hold(upper);
+ lag->upper_netdev = upper;
+ }
+
+ ice_clear_sriov_cap(pf);
+
+ lag->bonded = true;
+ lag->role = ICE_LAG_UNSET;
+
+ /* if this is the first element in an LAG mark as master */
+ lag->master = !!(peers == 1);
+}
+
+/**
+ * ice_lag_unlink - handle unlink event
+ * @lag: LAG info struct
+ * @info: info from netdev notification
+ */
+static void
+ice_lag_unlink(struct ice_lag *lag,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *netdev_tmp, *upper = info->upper_dev;
+ struct ice_pf *pf = lag->pf;
+ bool found = false;
+
+ if (!lag->bonded) {
+ netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
+ return;
+ }
+
+ /* determine if we are in the new LAG config or not */
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper, netdev_tmp) {
+ if (netdev_tmp == lag->netdev) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (found)
+ return;
+
+ if (lag->upper_netdev) {
+ dev_put(lag->upper_netdev);
+ lag->upper_netdev = NULL;
+ }
+
+ if (lag->peer_netdev) {
+ dev_put(lag->peer_netdev);
+ lag->peer_netdev = NULL;
+ }
+
+ ice_set_sriov_cap(pf);
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+}
+
+/**
+ * ice_lag_changeupper_event - handle LAG changeupper event
+ * @lag: LAG info struct
+ * @ptr: opaque pointer data
+ *
+ * ptr is to be cast into netdev_notifier_changeupper_info
+ */
+static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *netdev;
+
+ info = ptr;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ /* not for this netdev */
+ if (netdev != lag->netdev)
+ return;
+
+ if (!info->upper_dev) {
+ netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n");
+ return;
+ }
+
+ netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
+
+ if (!netif_is_lag_master(info->upper_dev)) {
+ netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
+ return;
+ }
+
+ if (info->linking)
+ ice_lag_link(lag, info);
+ else
+ ice_lag_unlink(lag, info);
+
+ ice_display_lag_info(lag);
+}
+
+/**
+ * ice_lag_changelower_event - handle LAG changelower event
+ * @lag: LAG info struct
+ * @ptr: opaque data pointer
+ *
+ * ptr to be cast to netdev_notifier_changelowerstate_info
+ */
+static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev)
+ return;
+
+ netdev_dbg(netdev, "bonding info\n");
+
+ if (!netif_is_lag_port(netdev))
+ netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
+}
+
+/**
+ * ice_lag_event_handler - handle LAG events from netdev
+ * @notif_blk: notifier block registered by this netdev
+ * @event: event type
+ * @ptr: opaque data containing notifier event
+ */
+static int
+ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct ice_lag *lag;
+
+ lag = container_of(notif_blk, struct ice_lag, notif_block);
+
+ if (!lag->netdev)
+ return NOTIFY_DONE;
+
+ /* Check that the netdev is in the working namespace */
+ if (!net_eq(dev_net(netdev), &init_net))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ ice_lag_changeupper_event(lag, ptr);
+ break;
+ case NETDEV_CHANGELOWERSTATE:
+ ice_lag_changelower_event(lag, ptr);
+ break;
+ case NETDEV_BONDING_INFO:
+ ice_lag_info_event(lag, ptr);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+/**
+ * ice_register_lag_handler - register LAG handler on netdev
+ * @lag: LAG struct
+ */
+static int ice_register_lag_handler(struct ice_lag *lag)
+{
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct notifier_block *notif_blk;
+
+ notif_blk = &lag->notif_block;
+
+ if (!notif_blk->notifier_call) {
+ notif_blk->notifier_call = ice_lag_event_handler;
+ if (register_netdevice_notifier(notif_blk)) {
+ notif_blk->notifier_call = NULL;
+ dev_err(dev, "FAIL register LAG event handler!\n");
+ return -EINVAL;
+ }
+ dev_dbg(dev, "LAG event handler registered\n");
+ }
+ return 0;
+}
+
+/**
+ * ice_unregister_lag_handler - unregister LAG handler on netdev
+ * @lag: LAG struct
+ */
+static void ice_unregister_lag_handler(struct ice_lag *lag)
+{
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct notifier_block *notif_blk;
+
+ notif_blk = &lag->notif_block;
+ if (notif_blk->notifier_call) {
+ unregister_netdevice_notifier(notif_blk);
+ dev_dbg(dev, "LAG event handler unregistered\n");
+ }
+}
+
+/**
+ * ice_init_lag - initialize support for LAG
+ * @pf: PF struct
+ *
+ * Alloc memory for LAG structs and initialize the elements.
+ * Memory will be freed in ice_deinit_lag
+ */
+int ice_init_lag(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_lag *lag;
+ struct ice_vsi *vsi;
+ int err;
+
+ pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
+ if (!pf->lag)
+ return -ENOMEM;
+ lag = pf->lag;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi) {
+ dev_err(dev, "couldn't get main vsi, link aggregation init fail\n");
+ err = -EIO;
+ goto lag_error;
+ }
+
+ lag->pf = pf;
+ lag->netdev = vsi->netdev;
+ lag->role = ICE_LAG_NONE;
+ lag->bonded = false;
+ lag->peer_netdev = NULL;
+ lag->upper_netdev = NULL;
+ lag->notif_block.notifier_call = NULL;
+
+ err = ice_register_lag_handler(lag);
+ if (err) {
+ dev_warn(dev, "INIT LAG: Failed to register event handler\n");
+ goto lag_error;
+ }
+
+ ice_display_lag_info(lag);
+
+ dev_dbg(dev, "INIT LAG complete\n");
+ return 0;
+
+lag_error:
+ kfree(lag);
+ pf->lag = NULL;
+ return err;
+}
+
+/**
+ * ice_deinit_lag - Clean up LAG
+ * @pf: PF struct
+ *
+ * Clean up kernel LAG info and free memory
+ * This function is meant to only be called on driver remove/shutdown
+ */
+void ice_deinit_lag(struct ice_pf *pf)
+{
+ struct ice_lag *lag;
+
+ lag = pf->lag;
+
+ if (!lag)
+ return;
+
+ if (lag->pf)
+ ice_unregister_lag_handler(lag);
+
+ if (lag->upper_netdev)
+ dev_put(lag->upper_netdev);
+
+ if (lag->peer_netdev)
+ dev_put(lag->peer_netdev);
+
+ kfree(lag);
+
+ pf->lag = NULL;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
new file mode 100644
index 000000000000..c2e3688dd8fd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018-2021, Intel Corporation. */
+
+#ifndef _ICE_LAG_H_
+#define _ICE_LAG_H_
+
+#include <linux/netdevice.h>
+
+/* LAG roles for netdev */
+enum ice_lag_role {
+ ICE_LAG_NONE,
+ ICE_LAG_PRIMARY,
+ ICE_LAG_BACKUP,
+ ICE_LAG_UNSET
+};
+
+struct ice_pf;
+
+/* LAG info struct */
+struct ice_lag {
+ struct ice_pf *pf; /* backlink to PF struct */
+ struct net_device *netdev; /* this PF's netdev */
+ struct net_device *peer_netdev;
+ struct net_device *upper_netdev; /* upper bonding netdev */
+ struct notifier_block notif_block;
+ u8 bonded:1; /* currently bonded */
+ u8 master:1; /* this is a master */
+ u8 handler:1; /* did we register a rx_netdev_handler */
+ /* each thing blocking bonding will increment this value by one.
+ * If this value is zero, then bonding is allowed.
+ */
+ u16 dis_lag;
+ u8 role;
+};
+
+int ice_init_lag(struct ice_pf *pf);
+void ice_deinit_lag(struct ice_pf *pf);
+rx_handler_result_t ice_lag_nop_handler(struct sk_buff **pskb);
+
+/**
+ * ice_disable_lag - increment LAG disable count
+ * @lag: LAG struct
+ */
+static inline void ice_disable_lag(struct ice_lag *lag)
+{
+ /* If LAG this PF is not already disabled, disable it */
+ rtnl_lock();
+ if (!netdev_is_rx_handler_busy(lag->netdev)) {
+ if (!netdev_rx_handler_register(lag->netdev,
+ ice_lag_nop_handler,
+ NULL))
+ lag->handler = true;
+ }
+ rtnl_unlock();
+ lag->dis_lag++;
+}
+
+/**
+ * ice_enable_lag - decrement disable count for a PF
+ * @lag: LAG struct
+ *
+ * Decrement the disable counter for a port, and if that count reaches
+ * zero, then remove the no-op Rx handler from that netdev
+ */
+static inline void ice_enable_lag(struct ice_lag *lag)
+{
+ if (lag->dis_lag)
+ lag->dis_lag--;
+ if (!lag->dis_lag && lag->handler) {
+ rtnl_lock();
+ netdev_rx_handler_unregister(lag->netdev);
+ rtnl_unlock();
+ lag->handler = false;
+ }
+}
+
+/**
+ * ice_is_lag_dis - is LAG disabled
+ * @lag: LAG struct
+ *
+ * Return true if bonding is disabled
+ */
+static inline bool ice_is_lag_dis(struct ice_lag *lag)
+{
+ return !!(lag->dis_lag);
+}
+#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ad9c22a1b97a..8d4e2ad4328d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -2078,7 +2078,7 @@ err_out:
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
- struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
@@ -2145,11 +2145,18 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
dev = ice_pf_to_dev(pf);
eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
- if (tx)
+ if (tx) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
- else
- status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI);
+ } else {
+ if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
+ status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
+ create);
+ } else {
+ status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
+ ICE_FWD_TO_VSI);
+ }
+ }
if (status)
dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
@@ -2158,6 +2165,126 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
}
/**
+ * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
+ * @vsi: pointer to the VSI
+ *
+ * This function will allocate new scheduler aggregator now if needed and will
+ * move specified VSI into it.
+ */
+static void ice_set_agg_vsi(struct ice_vsi *vsi)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_agg_node *agg_node_iter = NULL;
+ u32 agg_id = ICE_INVALID_AGG_NODE_ID;
+ struct ice_agg_node *agg_node = NULL;
+ int node_offset, max_agg_nodes = 0;
+ struct ice_port_info *port_info;
+ struct ice_pf *pf = vsi->back;
+ u32 agg_node_id_start = 0;
+ enum ice_status status;
+
+ /* create (as needed) scheduler aggregator node and move VSI into
+ * corresponding aggregator node
+ * - PF aggregator node to contains VSIs of type _PF and _CTRL
+ * - VF aggregator nodes will contain VF VSI
+ */
+ port_info = pf->hw.port_info;
+ if (!port_info)
+ return;
+
+ switch (vsi->type) {
+ case ICE_VSI_CTRL:
+ case ICE_VSI_LB:
+ case ICE_VSI_PF:
+ max_agg_nodes = ICE_MAX_PF_AGG_NODES;
+ agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
+ agg_node_iter = &pf->pf_agg_node[0];
+ break;
+ case ICE_VSI_VF:
+ /* user can create 'n' VFs on a given PF, but since max children
+ * per aggregator node can be only 64. Following code handles
+ * aggregator(s) for VF VSIs, either selects a agg_node which
+ * was already created provided num_vsis < 64, otherwise
+ * select next available node, which will be created
+ */
+ max_agg_nodes = ICE_MAX_VF_AGG_NODES;
+ agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
+ agg_node_iter = &pf->vf_agg_node[0];
+ break;
+ default:
+ /* other VSI type, handle later if needed */
+ dev_dbg(dev, "unexpected VSI type %s\n",
+ ice_vsi_type_str(vsi->type));
+ return;
+ }
+
+ /* find the appropriate aggregator node */
+ for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
+ /* see if we can find space in previously created
+ * node if num_vsis < 64, otherwise skip
+ */
+ if (agg_node_iter->num_vsis &&
+ agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ agg_node_iter++;
+ continue;
+ }
+
+ if (agg_node_iter->valid &&
+ agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
+ agg_id = agg_node_iter->agg_id;
+ agg_node = agg_node_iter;
+ break;
+ }
+
+ /* find unclaimed agg_id */
+ if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
+ agg_id = node_offset + agg_node_id_start;
+ agg_node = agg_node_iter;
+ break;
+ }
+ /* move to next agg_node */
+ agg_node_iter++;
+ }
+
+ if (!agg_node)
+ return;
+
+ /* if selected aggregator node was not created, create it */
+ if (!agg_node->valid) {
+ status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
+ (u8)vsi->tc_cfg.ena_tc);
+ if (status) {
+ dev_err(dev, "unable to create aggregator node with agg_id %u\n",
+ agg_id);
+ return;
+ }
+ /* aggregator node is created, store the neeeded info */
+ agg_node->valid = true;
+ agg_node->agg_id = agg_id;
+ }
+
+ /* move VSI to corresponding aggregator node */
+ status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
+ (u8)vsi->tc_cfg.ena_tc);
+ if (status) {
+ dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, agg_id);
+ return;
+ }
+
+ /* keep active children count for aggregator node */
+ agg_node->num_vsis++;
+
+ /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
+ * to aggregator node
+ */
+ vsi->agg_node = agg_node;
+ dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
+ vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
+ vsi->agg_node->num_vsis);
+}
+
+/**
* ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -2327,6 +2454,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_cfg_sw_lldp(vsi, true, true);
}
+ if (!vsi->agg_node)
+ ice_set_agg_vsi(vsi);
return vsi;
unroll_clear_rings:
@@ -2342,6 +2471,8 @@ unroll_vsi_init:
unroll_get_qs:
ice_vsi_put_qs(vsi);
unroll_vsi_alloc:
+ if (vsi_type == ICE_VSI_VF)
+ ice_enable_lag(pf->lag);
ice_vsi_clear(vsi);
return NULL;
@@ -2669,6 +2800,9 @@ int ice_vsi_release(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
+ if (vsi->type == ICE_VSI_VF &&
+ vsi->agg_node && vsi->agg_node->valid)
+ vsi->agg_node->num_vsis--;
ice_vsi_clear_rings(vsi);
ice_vsi_put_qs(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index e10ca8929f85..2c23c8f468a5 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -44,6 +44,11 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
static void ice_vsi_release_all(struct ice_pf *pf);
+bool netif_is_ice(struct net_device *dev)
+{
+ return dev && (dev->netdev_ops == &ice_netdev_ops);
+}
+
/**
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
@@ -430,11 +435,19 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
*/
static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
{
+ int node;
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_dis_vsi(pf->vsi[v], locked);
+
+ for (node = 0; node < ICE_MAX_PF_AGG_NODES; node++)
+ pf->pf_agg_node[node].num_vsis = 0;
+
+ for (node = 0; node < ICE_MAX_VF_AGG_NODES; node++)
+ pf->vf_agg_node[node].num_vsis = 0;
+
}
/**
@@ -785,15 +798,9 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
u8 mib_type, *buf, *lldpmib = NULL;
u16 len, typelen, offset = 0;
struct ice_lldp_org_tlv *tlv;
- struct ice_hw *hw;
+ struct ice_hw *hw = &pf->hw;
u32 ouisubtype;
- if (!pf) {
- dev_dbg(dev, "%s NULL pf pointer\n", __func__);
- return;
- }
-
- hw = &pf->hw;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib) {
@@ -2482,6 +2489,22 @@ free_qmap:
}
/**
+ * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
+ * @vsi: VSI to schedule napi on
+ */
+static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
+{
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_ring *rx_ring = vsi->rx_rings[i];
+
+ if (rx_ring->xsk_pool)
+ napi_schedule(&rx_ring->q_vector->napi);
+ }
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2525,16 +2548,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running)
ret = ice_up(vsi);
- if (!ret && prog && vsi->xsk_pools) {
- int i;
-
- ice_for_each_rxq(vsi, i) {
- struct ice_ring *rx_ring = vsi->rx_rings[i];
-
- if (rx_ring->xsk_pool)
- napi_schedule(&rx_ring->q_vector->napi);
- }
- }
+ if (!ret && prog)
+ ice_vsi_rx_napi_schedule(vsi);
return (ret || xdp_ring_err) ? -ENOMEM : 0;
}
@@ -3376,28 +3391,20 @@ static int ice_init_pf(struct ice_pf *pf)
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
+ int v_left, v_actual, v_other, v_budget = 0;
struct device *dev = ice_pf_to_dev(pf);
- int v_left, v_actual, v_budget = 0;
int needed, err, i;
v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
- /* reserve one vector for miscellaneous handler */
- needed = 1;
+ /* reserve for LAN miscellaneous handler */
+ needed = ICE_MIN_LAN_OICR_MSIX;
if (v_left < needed)
goto no_hw_vecs_left_err;
v_budget += needed;
v_left -= needed;
- /* reserve vectors for LAN traffic */
- needed = min_t(int, num_online_cpus(), v_left);
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_lan_msix = needed;
- v_budget += needed;
- v_left -= needed;
-
- /* reserve one vector for flow director */
+ /* reserve for flow director */
if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
needed = ICE_FDIR_MSIX;
if (v_left < needed)
@@ -3406,9 +3413,19 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
}
+ /* total used for non-traffic vectors */
+ v_other = v_budget;
+
+ /* reserve vectors for LAN traffic */
+ needed = min_t(int, num_online_cpus(), v_left);
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ pf->num_lan_msix = needed;
+ v_budget += needed;
+ v_left -= needed;
+
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
-
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
@@ -3420,7 +3437,6 @@ static int ice_ena_msix_range(struct ice_pf *pf)
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
ICE_MIN_MSIX, v_budget);
-
if (v_actual < 0) {
dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
@@ -3437,7 +3453,16 @@ static int ice_ena_msix_range(struct ice_pf *pf)
err = -ERANGE;
goto msix_err;
} else {
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ int v_traffic = v_actual - v_other;
+
+ if (v_actual == ICE_MIN_MSIX ||
+ v_traffic < ICE_MIN_LAN_TXRX_MSIX)
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ else
+ pf->num_lan_msix = v_traffic;
+
+ dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
}
}
@@ -3495,9 +3520,9 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
return vectors;
/* set up vector assignment tracking */
- pf->irq_tracker =
- devm_kzalloc(ice_pf_to_dev(pf), sizeof(*pf->irq_tracker) +
- (sizeof(u16) * vectors), GFP_KERNEL);
+ pf->irq_tracker = devm_kzalloc(ice_pf_to_dev(pf),
+ struct_size(pf->irq_tracker, list, vectors),
+ GFP_KERNEL);
if (!pf->irq_tracker) {
ice_dis_msix(pf);
return -ENOMEM;
@@ -4233,6 +4258,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_cfg_lldp_mib_change(&pf->hw, true);
}
+ if (ice_init_lag(pf))
+ dev_warn(dev, "Failed to init link aggregation support\n");
+
/* print PCI link speed and width */
pcie_print_link_status(pf->pdev);
@@ -4355,6 +4383,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_aq_cancel_waiting_tasks(pf);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+ ice_deinit_lag(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
@@ -6125,15 +6154,6 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
- if (new_mtu < (int)netdev->min_mtu) {
- netdev_err(netdev, "new MTU invalid. min_mtu is %d\n",
- netdev->min_mtu);
- return -EINVAL;
- } else if (new_mtu > (int)netdev->max_mtu) {
- netdev_err(netdev, "new MTU invalid. max_mtu is %d\n",
- netdev->min_mtu);
- return -EINVAL;
- }
/* if a reset is in progress, wait for some time for it to complete */
do {
if (ice_is_reset_in_progress(pf->state)) {
@@ -6158,7 +6178,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
err = ice_down(vsi);
if (err) {
- netdev_err(netdev, "change MTU if_up err %d\n", err);
+ netdev_err(netdev, "change MTU if_down err %d\n", err);
return err;
}
@@ -6250,6 +6270,8 @@ const char *ice_stat_str(enum ice_status stat_err)
return "ICE_ERR_OUT_OF_RANGE";
case ICE_ERR_ALREADY_EXISTS:
return "ICE_ERR_ALREADY_EXISTS";
+ case ICE_ERR_NVM:
+ return "ICE_ERR_NVM";
case ICE_ERR_NVM_CHECKSUM:
return "ICE_ERR_NVM_CHECKSUM";
case ICE_ERR_BUF_TOO_SHORT:
@@ -6792,6 +6814,4 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index f729cd0c6224..75ccbfc07f99 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -72,7 +72,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*length = 0;
/* Verify the length of the read if this is for the Shadow RAM */
- if (read_shadow_ram && ((offset + inlen) > (hw->nvm.sr_words * 2u))) {
+ if (read_shadow_ram && ((offset + inlen) > (hw->flash.sr_words * 2u))) {
ice_debug(hw, ICE_DBG_NVM, "NVM error: requested offset is beyond Shadow RAM limit\n");
return ICE_ERR_PARAM;
}
@@ -213,7 +213,7 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
- if (hw->nvm.blank_nvm_mode)
+ if (hw->flash.blank_nvm_mode)
return 0;
return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
@@ -227,13 +227,186 @@ ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
*/
void ice_release_nvm(struct ice_hw *hw)
{
- if (hw->nvm.blank_nvm_mode)
+ if (hw->flash.blank_nvm_mode)
return;
ice_release_res(hw, ICE_NVM_RES_ID);
}
/**
+ * ice_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Returns the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select bank, u16 module)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ enum ice_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case ICE_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case ICE_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case ICE_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash module: 0x%04x\n", module);
+ return 0;
+ }
+
+ switch (active_bank) {
+ case ICE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case ICE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_NVM, "Unexpected value for active flash bank: %u\n",
+ active_bank);
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case ICE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case ICE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ ice_debug(hw, ICE_DBG_NVM, "Unexpected value for flash bank selection: %u\n", bank);
+ return 0;
+}
+
+/**
+ * ice_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ice_determine_active_flash_banks()
+ * during initialization.
+ */
+static enum ice_status
+ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
+ u32 offset, u8 *data, u32 length)
+{
+ enum ice_status status;
+ u32 start;
+
+ start = ice_get_flash_bank_offset(hw, bank, module);
+ if (!start) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to calculate flash bank offset for module 0x%04x\n",
+ module);
+ return ICE_ERR_PARAM;
+ }
+
+ status = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (status)
+ return status;
+
+ status = ice_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ice_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ice_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ */
+static enum ice_status
+ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
+{
+ enum ice_status status;
+ __le16 data_local;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
+ (__force u8 *)&data_local, sizeof(u16));
+ if (!status)
+ *data = le16_to_cpu(data_local);
+
+ return status;
+}
+
+/**
+ * ice_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ */
+static enum ice_status
+ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
+{
+ return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+}
+
+/**
+ * ice_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ */
+static enum ice_status
+ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
+{
+ enum ice_status status;
+ __le16 data_local;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
+ (__force u8 *)&data_local, sizeof(u16));
+ if (!status)
+ *data = le16_to_cpu(data_local);
+
+ return status;
+}
+
+/**
* ice_read_sr_word - Reads Shadow RAM word and acquire NVM if necessary
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
@@ -380,138 +553,246 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
}
/**
- * ice_get_orom_ver_info - Read Option ROM version information
+ * ice_get_nvm_ver_info - Read NVM version information
* @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
*
- * Read the Combo Image version data from the Boot Configuration TLV and fill
- * in the option ROM version data.
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the NVM info structure.
*/
-static enum ice_status ice_get_orom_ver_info(struct ice_hw *hw)
+static enum ice_status
+ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{
- u16 combo_hi, combo_lo, boot_cfg_tlv, boot_cfg_tlv_len;
- struct ice_orom_info *orom = &hw->nvm.orom;
+ u16 eetrack_lo, eetrack_hi, ver;
enum ice_status status;
- u32 combo_ver;
- status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
- ICE_SR_BOOT_CFG_PTR);
+ status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read Boot Configuration Block TLV.\n");
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read DEV starter version.\n");
return status;
}
- /* Boot Configuration Block must have length at least 2 words
- * (Combo Image Version High and Combo Image Version Low)
- */
- if (boot_cfg_tlv_len < 2) {
- ice_debug(hw, ICE_DBG_INIT, "Invalid Boot Configuration Block TLV size.\n");
- return ICE_ERR_INVAL_SIZE;
- }
+ nvm->major = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
+ nvm->minor = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
- status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF),
- &combo_hi);
+ status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK lo.\n");
+ return status;
+ }
+ status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER hi.\n");
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read EETRACK hi.\n");
return status;
}
- status = ice_read_sr_word(hw, (boot_cfg_tlv + ICE_NVM_OROM_VER_OFF + 1),
- &combo_lo);
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ return 0;
+}
+
+/**
+ * ice_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ */
+enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
+{
+ return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ice_get_orom_civd_data - Get the combo version information from Option ROM
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @civd: storage for the Option ROM CIVD data.
+ *
+ * Searches through the Option ROM flash contents to locate the CIVD data for
+ * the image.
+ */
+static enum ice_status
+ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
+ struct ice_orom_civd_info *civd)
+{
+ struct ice_orom_civd_info tmp;
+ enum ice_status status;
+ u32 offset;
+
+ /* The CIVD section is located in the Option ROM aligned to 512 bytes.
+ * The first 4 bytes must contain the ASCII characters "$CIV".
+ * A simple modulo 256 sum of all of the bytes of the structure must
+ * equal 0.
+ */
+ for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
+ u8 sum = 0, i;
+
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
+ offset, (u8 *)&tmp, sizeof(tmp));
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM CIVD data\n");
+ return status;
+ }
+
+ /* Skip forward until we find a matching signature */
+ if (memcmp("$CIV", tmp.signature, sizeof(tmp.signature)) != 0)
+ continue;
+
+ /* Verify that the simple checksum is zero */
+ for (i = 0; i < sizeof(tmp); i++)
+ sum += ((u8 *)&tmp)[i];
+
+ if (sum) {
+ ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
+ sum);
+ return ICE_ERR_NVM;
+ }
+
+ *civd = tmp;
+ return 0;
+ }
+
+ return ICE_ERR_NVM;
+}
+
+/**
+ * ice_get_orom_ver_info - Read Option ROM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash module
+ * @orom: pointer to Option ROM info structure
+ *
+ * Read Option ROM version and security revision from the Option ROM flash
+ * section.
+ */
+static enum ice_status
+ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
+{
+ struct ice_orom_civd_info civd;
+ enum ice_status status;
+ u32 combo_ver;
+
+ status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read OROM_VER lo.\n");
+ ice_debug(hw, ICE_DBG_NVM, "Failed to locate valid Option ROM CIVD data\n");
return status;
}
- combo_ver = ((u32)combo_hi << 16) | combo_lo;
+ combo_ver = le32_to_cpu(civd.combo_ver);
- orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >>
- ICE_OROM_VER_SHIFT);
+ orom->major = (u8)((combo_ver & ICE_OROM_VER_MASK) >> ICE_OROM_VER_SHIFT);
orom->patch = (u8)(combo_ver & ICE_OROM_VER_PATCH_MASK);
- orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >>
- ICE_OROM_VER_BUILD_SHIFT);
+ orom->build = (u16)((combo_ver & ICE_OROM_VER_BUILD_MASK) >> ICE_OROM_VER_BUILD_SHIFT);
return 0;
}
/**
- * ice_get_netlist_ver_info
+ * ice_get_inactive_orom_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @orom: storage for Option ROM version information
+ *
+ * Reads the Option ROM version and security revision data for the inactive
+ * section of flash. Used to access version data for a pending update that has
+ * not yet been activated.
+ */
+enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
+{
+ return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
+}
+
+/**
+ * ice_get_netlist_info
* @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
*
- * Get the netlist version information
- */
-static enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw)
-{
- struct ice_netlist_ver_info *ver = &hw->netlist_ver;
- enum ice_status ret;
- u32 id_blk_start;
- __le16 raw_data;
- u16 data, i;
- u16 *buff;
-
- ret = ice_acquire_nvm(hw, ICE_RES_READ);
- if (ret)
- return ret;
- buff = kcalloc(ICE_AQC_NVM_NETLIST_ID_BLK_LEN, sizeof(*buff),
- GFP_KERNEL);
- if (!buff) {
- ret = ICE_ERR_NO_MEMORY;
- goto exit_no_mem;
- }
-
- /* read module length */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
- ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
- false, false, NULL);
- if (ret)
- goto exit_error;
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ */
+static enum ice_status
+ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
+ struct ice_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ enum ice_status status;
+ u16 *id_blk;
- data = le16_to_cpu(raw_data);
- /* exit if length is = 0 */
- if (!data)
- goto exit_error;
+ status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
+ if (status)
+ return status;
- /* read node count */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
- ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
- false, false, NULL);
- if (ret)
- goto exit_error;
- data = le16_to_cpu(raw_data) & ICE_AQC_NVM_NETLIST_NODE_COUNT_M;
+ if (module_id != ICE_NETLIST_LINK_TOPO_MOD_ID) {
+ ice_debug(hw, ICE_DBG_NVM, "Expected netlist module_id ID of 0x%04x, but got 0x%04x\n",
+ ICE_NETLIST_LINK_TOPO_MOD_ID, module_id);
+ return ICE_ERR_NVM;
+ }
+
+ status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_MODULE_LEN, &length);
+ if (status)
+ return status;
+
+ /* sanity check that we have at least enough words to store the netlist ID block */
+ if (length < ICE_NETLIST_ID_BLK_SIZE) {
+ ice_debug(hw, ICE_DBG_NVM, "Netlist Link Topology module too small. Expected at least %u words, but got %u words.\n",
+ ICE_NETLIST_ID_BLK_SIZE, length);
+ return ICE_ERR_NVM;
+ }
- /* netlist ID block starts from offset 4 + node count * 2 */
- id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
+ status = ice_read_netlist_module(hw, bank, ICE_LINK_TOPO_NODE_COUNT, &node_count);
+ if (status)
+ return status;
+ node_count &= ICE_LINK_TOPO_NODE_COUNT_M;
- /* read the entire netlist ID block */
- ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
- id_blk_start * 2,
- ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
- false, NULL);
- if (ret)
+ id_blk = kcalloc(ICE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
+ if (!id_blk)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Read out the entire Netlist ID Block at once. */
+ status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR,
+ ICE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
+ (u8 *)id_blk, ICE_NETLIST_ID_BLK_SIZE * sizeof(u16));
+ if (status)
goto exit_error;
- for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
- buff[i] = le16_to_cpu(((__force __le16 *)buff)[i]);
-
- ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
- ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
- ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
- ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
- ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
+ for (i = 0; i < ICE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = le16_to_cpu(((__force __le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[ICE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[ICE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[ICE_NETLIST_ID_BLK_CUST_VER];
/* Read the left most 4 bytes of SHA */
- ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
- buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
+ netlist->hash = id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[ICE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
exit_error:
- kfree(buff);
-exit_no_mem:
- ice_release_nvm(hw);
- return ret;
+ kfree(id_blk);
+
+ return status;
+}
+
+/**
+ * ice_get_inactive_netlist_ver
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ */
+enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
+{
+ return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
}
/**
@@ -555,7 +836,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
ice_debug(hw, ICE_DBG_NVM, "Predicted flash size is %u bytes\n", max_size);
- hw->nvm.flash_size = max_size;
+ hw->flash.flash_size = max_size;
err_read_flat_nvm:
ice_release_nvm(hw);
@@ -564,6 +845,151 @@ err_read_flat_nvm:
}
/**
+ * ice_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ */
+static enum ice_status
+ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+{
+ enum ice_status status;
+ u16 value;
+
+ status = ice_read_sr_word(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & ICE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~ICE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+ else
+ *pointer = value * 2;
+
+ return 0;
+}
+
+/**
+ * ice_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ */
+static enum ice_status
+ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+{
+ enum ice_status status;
+ u16 value;
+
+ status = ice_read_sr_word(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * 4 * 1024;
+
+ return 0;
+}
+
+/**
+ * ice_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ice_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ */
+static enum ice_status
+ice_determine_active_flash_banks(struct ice_hw *hw)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ enum ice_status status;
+ u16 ctrl_word;
+
+ status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read the Shadow RAM control word\n");
+ return status;
+ }
+
+ /* Check that the control word indicates validity */
+ if ((ctrl_word & ICE_SR_CTRL_WORD_1_M) >> ICE_SR_CTRL_WORD_1_S != ICE_SR_CTRL_WORD_VALID) {
+ ice_debug(hw, ICE_DBG_NVM, "Shadow RAM control word is invalid\n");
+ return ICE_ERR_CFG;
+ }
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = ICE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = ICE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & ICE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = ICE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = ICE_2ND_FLASH_BANK;
+
+ status = ice_read_sr_pointer(hw, ICE_SR_1ST_NVM_BANK_PTR, &banks->nvm_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_NVM_BANK_SIZE, &banks->nvm_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM bank area size\n");
+ return status;
+ }
+
+ status = ice_read_sr_pointer(hw, ICE_SR_1ST_OROM_BANK_PTR, &banks->orom_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_OROM_BANK_SIZE, &banks->orom_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read OROM bank area size\n");
+ return status;
+ }
+
+ status = ice_read_sr_pointer(hw, ICE_SR_NETLIST_BANK_PTR, &banks->netlist_ptr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank pointer\n");
+ return status;
+ }
+
+ status = ice_read_sr_area_size(hw, ICE_SR_NETLIST_BANK_SIZE, &banks->netlist_size);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to read Netlist bank area size\n");
+ return status;
+ }
+
+ return 0;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -572,8 +998,7 @@ err_read_flat_nvm:
*/
enum ice_status ice_init_nvm(struct ice_hw *hw)
{
- struct ice_nvm_info *nvm = &hw->nvm;
- u16 eetrack_lo, eetrack_hi, ver;
+ struct ice_flash_info *flash = &hw->flash;
enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
@@ -585,54 +1010,43 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
/* Switching to words (sr_size contains power of 2) */
- nvm->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
+ flash->sr_words = BIT(sr_size) * ICE_SR_WORDS_IN_1KB;
/* Check if we are in the normal or blank NVM programming mode */
fla = rd32(hw, GLNVM_FLA);
if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
- nvm->blank_nvm_mode = false;
+ flash->blank_nvm_mode = false;
} else {
/* Blank programming mode */
- nvm->blank_nvm_mode = true;
+ flash->blank_nvm_mode = true;
ice_debug(hw, ICE_DBG_NVM, "NVM init error: unsupported blank mode.\n");
return ICE_ERR_NVM_BLANK_MODE;
}
- status = ice_read_sr_word(hw, ICE_SR_NVM_DEV_STARTER_VER, &ver);
+ status = ice_discover_flash_size(hw);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read DEV starter version.\n");
+ ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
return status;
}
- nvm->major_ver = (ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
- nvm->minor_ver = (ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
- status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_LO, &eetrack_lo);
- if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK lo.\n");
- return status;
- }
- status = ice_read_sr_word(hw, ICE_SR_NVM_EETRACK_HI, &eetrack_hi);
+ status = ice_determine_active_flash_banks(hw);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed to read EETRACK hi.\n");
+ ice_debug(hw, ICE_DBG_NVM, "Failed to determine active flash banks.\n");
return status;
}
- nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
-
- status = ice_discover_flash_size(hw);
+ status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
if (status) {
- ice_debug(hw, ICE_DBG_NVM, "NVM init error: failed to discover flash size.\n");
+ ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
return status;
}
- status = ice_get_orom_ver_info(hw);
- if (status) {
+ status = ice_get_orom_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->orom);
+ if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read Option ROM info.\n");
- return status;
- }
/* read the netlist version information */
- status = ice_get_netlist_ver_info(hw);
+ status = ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->netlist);
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 8d430909f846..c6f05f43d593 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -4,6 +4,14 @@
#ifndef _ICE_NVM_H_
#define _ICE_NVM_H_
+struct ice_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+} __packed;
+
enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
@@ -14,6 +22,12 @@ enum ice_status
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
enum ice_status
+ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
+enum ice_status
+ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
+enum ice_status
+ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
+enum ice_status
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
enum ice_status ice_init_nvm(struct ice_hw *hw);
enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index f0912e44d4ad..2403cb38b93c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -431,6 +431,27 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
+ * ice_aq_move_sched_elems - move scheduler elements
+ * @hw: pointer to the HW struct
+ * @grps_req: number of groups to move
+ * @buf: pointer to buffer
+ * @buf_size: buffer size in bytes
+ * @grps_movd: returns total number of groups moved
+ * @cd: pointer to command details structure or NULL
+ *
+ * Move scheduling elements (0x0408)
+ */
+static enum ice_status
+ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
+ struct ice_aqc_move_elem *buf, u16 buf_size,
+ u16 *grps_movd, struct ice_sq_cd *cd)
+{
+ return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
+ grps_req, (void *)buf, buf_size,
+ grps_movd, cd);
+}
+
+/**
* ice_aq_suspend_sched_elems - suspend scheduler elements
* @hw: pointer to the HW struct
* @elems_req: number of elements to suspend
@@ -1022,6 +1043,28 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
}
/**
+ * ice_sched_get_agg_layer - get the current aggregator layer number
+ * @hw: pointer to the HW struct
+ *
+ * This function returns the current aggregator layer number
+ */
+static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
+{
+ /* Num Layers aggregator layer
+ * 9 4
+ * 7 or less sw_entry_point_layer
+ */
+ /* calculate the aggregator layer based on number of layers. */
+ if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
+ u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
+
+ if (layer > hw->sw_entry_point_layer)
+ return layer;
+ }
+ return hw->sw_entry_point_layer;
+}
+
+/**
* ice_rm_dflt_leaf_node - remove the default leaf node in the tree
* @pi: port information structure
*
@@ -1239,6 +1282,46 @@ sched_query_out:
}
/**
+ * ice_sched_get_psm_clk_freq - determine the PSM clock frequency
+ * @hw: pointer to the HW struct
+ *
+ * Determine the PSM clock frequency and store in HW struct
+ */
+void ice_sched_get_psm_clk_freq(struct ice_hw *hw)
+{
+ u32 val, clk_src;
+
+ val = rd32(hw, GLGEN_CLKSTAT_SRC);
+ clk_src = (val & GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_M) >>
+ GLGEN_CLKSTAT_SRC_PSM_CLK_SRC_S;
+
+#define PSM_CLK_SRC_367_MHZ 0x0
+#define PSM_CLK_SRC_416_MHZ 0x1
+#define PSM_CLK_SRC_446_MHZ 0x2
+#define PSM_CLK_SRC_390_MHZ 0x3
+
+ switch (clk_src) {
+ case PSM_CLK_SRC_367_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_367MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_416_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_416MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_446_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
+ break;
+ case PSM_CLK_SRC_390_MHZ:
+ hw->psm_clk_freq = ICE_PSM_CLK_390MHZ_IN_HZ;
+ break;
+ default:
+ ice_debug(hw, ICE_DBG_SCHED, "PSM clk_src unexpected %u\n",
+ clk_src);
+ /* fall back to a safe default */
+ hw->psm_clk_freq = ICE_PSM_CLK_446MHZ_IN_HZ;
+ }
+}
+
+/**
* ice_sched_find_node_in_subtree - Find node in part of base node subtree
* @hw: pointer to the HW struct
* @base: pointer to the base node
@@ -1364,7 +1447,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
/**
* ice_sched_get_vsi_node - Get a VSI node based on VSI ID
- * @hw: pointer to the HW struct
+ * @pi: pointer to the port information structure
* @tc_node: pointer to the TC node
* @vsi_handle: software VSI handle
*
@@ -1372,14 +1455,14 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* TC branch
*/
static struct ice_sched_node *
-ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
+ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u16 vsi_handle)
{
struct ice_sched_node *node;
u8 vsi_layer;
- vsi_layer = ice_sched_get_vsi_layer(hw);
- node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
+ node = ice_sched_get_first_node(pi, tc_node, vsi_layer);
/* Check whether it already exists */
while (node) {
@@ -1392,6 +1475,38 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
}
/**
+ * ice_sched_get_agg_node - Get an aggregator node based on aggregator ID
+ * @pi: pointer to the port information structure
+ * @tc_node: pointer to the TC node
+ * @agg_id: aggregator ID
+ *
+ * This function retrieves an aggregator node for a given aggregator ID from
+ * a given TC branch
+ */
+static struct ice_sched_node *
+ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ u32 agg_id)
+{
+ struct ice_sched_node *node;
+ struct ice_hw *hw = pi->hw;
+ u8 agg_layer;
+
+ if (!hw)
+ return NULL;
+ agg_layer = ice_sched_get_agg_layer(hw);
+ node = ice_sched_get_first_node(pi, tc_node, agg_layer);
+
+ /* Check whether it already exists */
+ while (node) {
+ if (node->agg_id == agg_id)
+ return node;
+ node = node->sibling;
+ }
+
+ return node;
+}
+
+/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct
* @num_qs: number of queues
@@ -1444,7 +1559,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
- parent = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
+ parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
if (!parent)
return ICE_ERR_CFG;
@@ -1477,7 +1592,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
/**
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
- * @hw: pointer to the HW struct
+ * @pi: pointer to the port info structure
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
@@ -1486,15 +1601,15 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* layers
*/
static void
-ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
+ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
struct ice_sched_node *node;
u8 vsil;
int i;
- vsil = ice_sched_get_vsi_layer(hw);
- for (i = vsil; i >= hw->sw_entry_point_layer; i--)
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+ for (i = vsil; i >= pi->hw->sw_entry_point_layer; i--)
/* Add intermediate nodes if TC has no children and
* need at least one node for VSI
*/
@@ -1504,11 +1619,10 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/* If intermediate nodes are reached max children
* then add a new one.
*/
- node = ice_sched_get_first_node(hw->port_info, tc_node,
- (u8)i);
+ node = ice_sched_get_first_node(pi, tc_node, (u8)i);
/* scan all the siblings */
while (node) {
- if (node->num_children < hw->max_children[i])
+ if (node->num_children < pi->hw->max_children[i])
break;
node = node->sibling;
}
@@ -1588,14 +1702,13 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *tc_node;
- struct ice_hw *hw = pi->hw;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
/* calculate number of supported nodes needed for this VSI */
- ice_sched_calc_vsi_support_nodes(hw, tc_node, num_nodes);
+ ice_sched_calc_vsi_support_nodes(pi, tc_node, num_nodes);
/* add VSI supported nodes to TC subtree */
return ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
@@ -1628,7 +1741,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!tc_node)
return ICE_ERR_CFG;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
@@ -1691,7 +1804,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
return ICE_ERR_PARAM;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
/* suspend the VSI if TC is not enabled */
if (!enable) {
@@ -1712,7 +1825,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
if (status)
return status;
- vsi_node = ice_sched_get_vsi_node(hw, tc_node, vsi_handle);
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
return ICE_ERR_CFG;
@@ -1821,7 +1934,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!tc_node)
continue;
- vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
if (!vsi_node)
continue;
@@ -1874,6 +1987,720 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
}
/**
+ * ice_get_agg_info - get the aggregator ID
+ * @hw: pointer to the hardware structure
+ * @agg_id: aggregator ID
+ *
+ * This function validates aggregator ID. The function returns info if
+ * aggregator ID is present in list otherwise it returns null.
+ */
+static struct ice_sched_agg_info *
+ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ list_for_each_entry(agg_info, &hw->agg_list, list_entry)
+ if (agg_info->agg_id == agg_id)
+ return agg_info;
+
+ return NULL;
+}
+
+/**
+ * ice_sched_get_free_vsi_parent - Find a free parent node in aggregator subtree
+ * @hw: pointer to the HW struct
+ * @node: pointer to a child node
+ * @num_nodes: num nodes count array
+ *
+ * This function walks through the aggregator subtree to find a free parent
+ * node
+ */
+static struct ice_sched_node *
+ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
+ u16 *num_nodes)
+{
+ u8 l = node->tx_sched_layer;
+ u8 vsil, i;
+
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ /* Is it VSI parent layer ? */
+ if (l == vsil - 1)
+ return (node->num_children < hw->max_children[l]) ? node : NULL;
+
+ /* We have intermediate nodes. Let's walk through the subtree. If the
+ * intermediate node has space to add a new node then clear the count
+ */
+ if (node->num_children < hw->max_children[l])
+ num_nodes[l] = 0;
+ /* The below recursive call is intentional and wouldn't go more than
+ * 2 or 3 iterations.
+ */
+
+ for (i = 0; i < node->num_children; i++) {
+ struct ice_sched_node *parent;
+
+ parent = ice_sched_get_free_vsi_parent(hw, node->children[i],
+ num_nodes);
+ if (parent)
+ return parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_sched_update_parent - update the new parent in SW DB
+ * @new_parent: pointer to a new parent node
+ * @node: pointer to a child node
+ *
+ * This function removes the child from the old parent and adds it to a new
+ * parent
+ */
+static void
+ice_sched_update_parent(struct ice_sched_node *new_parent,
+ struct ice_sched_node *node)
+{
+ struct ice_sched_node *old_parent;
+ u8 i, j;
+
+ old_parent = node->parent;
+
+ /* update the old parent children */
+ for (i = 0; i < old_parent->num_children; i++)
+ if (old_parent->children[i] == node) {
+ for (j = i + 1; j < old_parent->num_children; j++)
+ old_parent->children[j - 1] =
+ old_parent->children[j];
+ old_parent->num_children--;
+ break;
+ }
+
+ /* now move the node to a new parent */
+ new_parent->children[new_parent->num_children++] = node;
+ node->parent = new_parent;
+ node->info.parent_teid = new_parent->info.node_teid;
+}
+
+/**
+ * ice_sched_move_nodes - move child nodes to a given parent
+ * @pi: port information structure
+ * @parent: pointer to parent node
+ * @num_items: number of child nodes to be moved
+ * @list: pointer to child node teids
+ *
+ * This function move the child nodes to a given parent.
+ */
+static enum ice_status
+ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
+ u16 num_items, u32 *list)
+{
+ struct ice_aqc_move_elem *buf;
+ struct ice_sched_node *node;
+ enum ice_status status = 0;
+ u16 i, grps_movd = 0;
+ struct ice_hw *hw;
+ u16 buf_len;
+
+ hw = pi->hw;
+
+ if (!parent || !num_items)
+ return ICE_ERR_PARAM;
+
+ /* Does parent have enough space */
+ if (parent->num_children + num_items >
+ hw->max_children[parent->tx_sched_layer])
+ return ICE_ERR_AQ_FULL;
+
+ buf_len = struct_size(buf, teid, 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < num_items; i++) {
+ node = ice_sched_find_node_by_teid(pi->root, list[i]);
+ if (!node) {
+ status = ICE_ERR_PARAM;
+ goto move_err_exit;
+ }
+
+ buf->hdr.src_parent_teid = node->info.parent_teid;
+ buf->hdr.dest_parent_teid = parent->info.node_teid;
+ buf->teid[0] = node->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(1);
+ status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
+ &grps_movd, NULL);
+ if (status && grps_movd != 1) {
+ status = ICE_ERR_CFG;
+ goto move_err_exit;
+ }
+
+ /* update the SW DB */
+ ice_sched_update_parent(parent, node);
+ }
+
+move_err_exit:
+ kfree(buf);
+ return status;
+}
+
+/**
+ * ice_sched_move_vsi_to_agg - move VSI to aggregator node
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function moves a VSI to an aggregator node or its subtree.
+ * Intermediate nodes may be created if required.
+ */
+static enum ice_status
+ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
+ u8 tc)
+{
+ struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ u32 first_node_teid, vsi_teid;
+ enum ice_status status;
+ u16 num_nodes_added;
+ u8 aggl, vsil, i;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ vsi_node = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
+ if (!vsi_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Is this VSI already part of given aggregator? */
+ if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
+ return 0;
+
+ aggl = ice_sched_get_agg_layer(pi->hw);
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+
+ /* set intermediate node count to 1 between aggregator and VSI layers */
+ for (i = aggl + 1; i < vsil; i++)
+ num_nodes[i] = 1;
+
+ /* Check if the aggregator subtree has any free node to add the VSI */
+ for (i = 0; i < agg_node->num_children; i++) {
+ parent = ice_sched_get_free_vsi_parent(pi->hw,
+ agg_node->children[i],
+ num_nodes);
+ if (parent)
+ goto move_nodes;
+ }
+
+ /* add new nodes */
+ parent = agg_node;
+ for (i = aggl + 1; i < vsil; i++) {
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
+ num_nodes[i],
+ &first_node_teid,
+ &num_nodes_added);
+ if (status || num_nodes[i] != num_nodes_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_nodes_added)
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ else
+ parent = parent->children[0];
+
+ if (!parent)
+ return ICE_ERR_CFG;
+ }
+
+move_nodes:
+ vsi_teid = le32_to_cpu(vsi_node->info.node_teid);
+ return ice_sched_move_nodes(pi, parent, 1, &vsi_teid);
+}
+
+/**
+ * ice_move_all_vsi_to_dflt_agg - move all VSI(s) to default aggregator
+ * @pi: port information structure
+ * @agg_info: aggregator info
+ * @tc: traffic class number
+ * @rm_vsi_info: true or false
+ *
+ * This function move all the VSI(s) to the default aggregator and delete
+ * aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
+ * caller holds the scheduler lock.
+ */
+static enum ice_status
+ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
+ struct ice_sched_agg_info *agg_info, u8 tc,
+ bool rm_vsi_info)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *tmp;
+ enum ice_status status = 0;
+
+ list_for_each_entry_safe(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
+ list_entry) {
+ u16 vsi_handle = agg_vsi_info->vsi_handle;
+
+ /* Move VSI to default aggregator */
+ if (!ice_is_tc_ena(agg_vsi_info->tc_bitmap[0], tc))
+ continue;
+
+ status = ice_sched_move_vsi_to_agg(pi, vsi_handle,
+ ICE_DFLT_AGG_ID, tc);
+ if (status)
+ break;
+
+ clear_bit(tc, agg_vsi_info->tc_bitmap);
+ if (rm_vsi_info && !agg_vsi_info->tc_bitmap[0]) {
+ list_del(&agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(pi->hw), agg_vsi_info);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_sched_is_agg_inuse - check whether the aggregator is in use or not
+ * @pi: port information structure
+ * @node: node pointer
+ *
+ * This function checks whether the aggregator is attached with any VSI or not.
+ */
+static bool
+ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
+{
+ u8 vsil, i;
+
+ vsil = ice_sched_get_vsi_layer(pi->hw);
+ if (node->tx_sched_layer < vsil - 1) {
+ for (i = 0; i < node->num_children; i++)
+ if (ice_sched_is_agg_inuse(pi, node->children[i]))
+ return true;
+ return false;
+ } else {
+ return node->num_children ? true : false;
+ }
+}
+
+/**
+ * ice_sched_rm_agg_cfg - remove the aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function removes the aggregator node and intermediate nodes if any
+ * from the given TC
+ */
+static enum ice_status
+ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
+{
+ struct ice_sched_node *tc_node, *agg_node;
+ struct ice_hw *hw = pi->hw;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ if (!agg_node)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Can't remove the aggregator node if it has children */
+ if (ice_sched_is_agg_inuse(pi, agg_node))
+ return ICE_ERR_IN_USE;
+
+ /* need to remove the whole subtree if aggregator node is the
+ * only child.
+ */
+ while (agg_node->tx_sched_layer > hw->sw_entry_point_layer) {
+ struct ice_sched_node *parent = agg_node->parent;
+
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ if (parent->num_children > 1)
+ break;
+
+ agg_node = parent;
+ }
+
+ ice_free_sched_node(pi, agg_node);
+ return 0;
+}
+
+/**
+ * ice_rm_agg_cfg_tc - remove aggregator configuration for TC
+ * @pi: port information structure
+ * @agg_info: aggregator ID
+ * @tc: TC number
+ * @rm_vsi_info: bool value true or false
+ *
+ * This function removes aggregator reference to VSI of given TC. It removes
+ * the aggregator configuration completely for requested TC. The caller needs
+ * to hold the scheduler lock.
+ */
+static enum ice_status
+ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
+ u8 tc, bool rm_vsi_info)
+{
+ enum ice_status status = 0;
+
+ /* If nothing to remove - return success */
+ if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
+ goto exit_rm_agg_cfg_tc;
+
+ status = ice_move_all_vsi_to_dflt_agg(pi, agg_info, tc, rm_vsi_info);
+ if (status)
+ goto exit_rm_agg_cfg_tc;
+
+ /* Delete aggregator node(s) */
+ status = ice_sched_rm_agg_cfg(pi, agg_info->agg_id, tc);
+ if (status)
+ goto exit_rm_agg_cfg_tc;
+
+ clear_bit(tc, agg_info->tc_bitmap);
+exit_rm_agg_cfg_tc:
+ return status;
+}
+
+/**
+ * ice_save_agg_tc_bitmap - save aggregator TC bitmap
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc_bitmap: 8 bits TC bitmap
+ *
+ * Save aggregator TC bitmap. This function needs to be called with scheduler
+ * lock held.
+ */
+static enum ice_status
+ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
+ unsigned long *tc_bitmap)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ bitmap_copy(agg_info->replay_tc_bitmap, tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS);
+ return 0;
+}
+
+/**
+ * ice_sched_add_agg_cfg - create an aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @tc: TC number
+ *
+ * This function creates an aggregator node and intermediate nodes if required
+ * for the given TC
+ */
+static enum ice_status
+ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
+{
+ struct ice_sched_node *parent, *agg_node, *tc_node;
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u32 first_node_teid;
+ u16 num_nodes_added;
+ u8 i, aggl;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node)
+ return ICE_ERR_CFG;
+
+ agg_node = ice_sched_get_agg_node(pi, tc_node, agg_id);
+ /* Does Agg node already exist ? */
+ if (agg_node)
+ return status;
+
+ aggl = ice_sched_get_agg_layer(hw);
+
+ /* need one node in Agg layer */
+ num_nodes[aggl] = 1;
+
+ /* Check whether the intermediate nodes have space to add the
+ * new aggregator. If they are full, then SW needs to allocate a new
+ * intermediate node on those layers
+ */
+ for (i = hw->sw_entry_point_layer; i < aggl; i++) {
+ parent = ice_sched_get_first_node(pi, tc_node, i);
+
+ /* scan all the siblings */
+ while (parent) {
+ if (parent->num_children < hw->max_children[i])
+ break;
+ parent = parent->sibling;
+ }
+
+ /* all the nodes are full, reserve one for this layer */
+ if (!parent)
+ num_nodes[i]++;
+ }
+
+ /* add the aggregator node */
+ parent = tc_node;
+ for (i = hw->sw_entry_point_layer; i <= aggl; i++) {
+ if (!parent)
+ return ICE_ERR_CFG;
+
+ status = ice_sched_add_nodes_to_layer(pi, tc_node, parent, i,
+ num_nodes[i],
+ &first_node_teid,
+ &num_nodes_added);
+ if (status || num_nodes[i] != num_nodes_added)
+ return ICE_ERR_CFG;
+
+ /* The newly added node can be a new parent for the next
+ * layer nodes
+ */
+ if (num_nodes_added) {
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_node_teid);
+ /* register aggregator ID with the aggregator node */
+ if (parent && i == aggl)
+ parent->agg_id = agg_id;
+ } else {
+ parent = parent->children[0];
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_sched_cfg_agg - configure aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @agg_type: aggregator type queue, VSI, or aggregator group
+ * @tc_bitmap: bits TC bitmap
+ *
+ * It registers a unique aggregator node into scheduler services. It
+ * allows a user to register with a unique ID to track it's resources.
+ * The aggregator type determines if this is a queue group, VSI group
+ * or aggregator group. It then creates the aggregator node(s) for requested
+ * TC(s) or removes an existing aggregator node including its configuration
+ * if indicated via tc_bitmap. Call ice_rm_agg_cfg to release aggregator
+ * resources and remove aggregator ID.
+ * This function needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
+ enum ice_agg_type agg_type, unsigned long *tc_bitmap)
+{
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u8 tc;
+
+ agg_info = ice_get_agg_info(hw, agg_id);
+ if (!agg_info) {
+ /* Create new entry for new aggregator ID */
+ agg_info = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*agg_info),
+ GFP_KERNEL);
+ if (!agg_info)
+ return ICE_ERR_NO_MEMORY;
+
+ agg_info->agg_id = agg_id;
+ agg_info->agg_type = agg_type;
+ agg_info->tc_bitmap[0] = 0;
+
+ /* Initialize the aggregator VSI list head */
+ INIT_LIST_HEAD(&agg_info->agg_vsi_list);
+
+ /* Add new entry in aggregator list */
+ list_add(&agg_info->list_entry, &hw->agg_list);
+ }
+ /* Create aggregator node(s) for requested TC(s) */
+ ice_for_each_traffic_class(tc) {
+ if (!ice_is_tc_ena(*tc_bitmap, tc)) {
+ /* Delete aggregator cfg TC if it exists previously */
+ status = ice_rm_agg_cfg_tc(pi, agg_info, tc, false);
+ if (status)
+ break;
+ continue;
+ }
+
+ /* Check if aggregator node for TC already exists */
+ if (ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
+ continue;
+
+ /* Create new aggregator node for TC */
+ status = ice_sched_add_agg_cfg(pi, agg_id, tc);
+ if (status)
+ break;
+
+ /* Save aggregator node's TC information */
+ set_bit(tc, agg_info->tc_bitmap);
+ }
+
+ return status;
+}
+
+/**
+ * ice_cfg_agg - config aggregator node
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @agg_type: aggregator type queue, VSI, or aggregator group
+ * @tc_bitmap: bits TC bitmap
+ *
+ * This function configures aggregator node(s).
+ */
+enum ice_status
+ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
+ u8 tc_bitmap)
+{
+ unsigned long bitmap = tc_bitmap;
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_cfg_agg(pi, agg_id, agg_type,
+ (unsigned long *)&bitmap);
+ if (!status)
+ status = ice_save_agg_tc_bitmap(pi, agg_id,
+ (unsigned long *)&bitmap);
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_get_agg_vsi_info - get the aggregator ID
+ * @agg_info: aggregator info
+ * @vsi_handle: software VSI handle
+ *
+ * The function returns aggregator VSI info based on VSI handle. This function
+ * needs to be called with scheduler lock held.
+ */
+static struct ice_sched_agg_vsi_info *
+ice_get_agg_vsi_info(struct ice_sched_agg_info *agg_info, u16 vsi_handle)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle)
+ return agg_vsi_info;
+
+ return NULL;
+}
+
+/**
+ * ice_get_vsi_agg_info - get the aggregator info of VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: Sw VSI handle
+ *
+ * The function returns aggregator info of VSI represented via vsi_handle. The
+ * VSI has in this case a different aggregator than the default one. This
+ * function needs to be called with scheduler lock held.
+ */
+static struct ice_sched_agg_info *
+ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_sched_agg_info *agg_info;
+
+ list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (agg_vsi_info)
+ return agg_info;
+ }
+ return NULL;
+}
+
+/**
+ * ice_save_agg_vsi_tc_bitmap - save aggregator VSI TC bitmap
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * Save VSI to aggregator TC bitmap. This function needs to call with scheduler
+ * lock held.
+ */
+static enum ice_status
+ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ unsigned long *tc_bitmap)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_info *agg_info;
+
+ agg_info = ice_get_agg_info(pi->hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ /* check if entry already exist */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info)
+ return ICE_ERR_PARAM;
+ bitmap_copy(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS);
+ return 0;
+}
+
+/**
+ * ice_sched_assoc_vsi_to_agg - associate/move VSI to new/default aggregator
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * This function moves VSI to a new or default aggregator node. If VSI is
+ * already associated to the aggregator node then no operation is performed on
+ * the tree. This function needs to be called with scheduler lock held.
+ */
+static enum ice_status
+ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
+ u16 vsi_handle, unsigned long *tc_bitmap)
+{
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status = 0;
+ struct ice_hw *hw = pi->hw;
+ u8 tc;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ agg_info = ice_get_agg_info(hw, agg_id);
+ if (!agg_info)
+ return ICE_ERR_PARAM;
+ /* check if entry already exist */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info) {
+ /* Create new entry for VSI under aggregator list */
+ agg_vsi_info = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*agg_vsi_info), GFP_KERNEL);
+ if (!agg_vsi_info)
+ return ICE_ERR_PARAM;
+
+ /* add VSI ID into the aggregator list */
+ agg_vsi_info->vsi_handle = vsi_handle;
+ list_add(&agg_vsi_info->list_entry, &agg_info->agg_vsi_list);
+ }
+ /* Move VSI node to new aggregator node for requested TC(s) */
+ ice_for_each_traffic_class(tc) {
+ if (!ice_is_tc_ena(*tc_bitmap, tc))
+ continue;
+
+ /* Move VSI to new aggregator */
+ status = ice_sched_move_vsi_to_agg(pi, vsi_handle, agg_id, tc);
+ if (status)
+ break;
+
+ set_bit(tc, agg_vsi_info->tc_bitmap);
+ }
+ return status;
+}
+
+/**
* ice_sched_rm_unused_rl_prof - remove unused RL profile
* @pi: port information structure
*
@@ -1955,7 +2782,6 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
buf = node->info;
data = &buf.data;
@@ -1970,7 +2796,32 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
}
/* Configure element */
- status = ice_sched_update_elem(hw, node, &buf);
+ return ice_sched_update_elem(hw, node, &buf);
+}
+
+/**
+ * ice_move_vsi_to_agg - moves VSI to new or default aggregator
+ * @pi: port information structure
+ * @agg_id: aggregator ID
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap of enabled TC(s)
+ *
+ * Move or associate VSI to a new or default aggregator node.
+ */
+enum ice_status
+ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ u8 tc_bitmap)
+{
+ unsigned long bitmap = tc_bitmap;
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
+ (unsigned long *)&bitmap);
+ if (!status)
+ status = ice_save_agg_vsi_tc_bitmap(pi, agg_id, vsi_handle,
+ (unsigned long *)&bitmap);
+ mutex_unlock(&pi->sched_lock);
return status;
}
@@ -2045,11 +2896,12 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
/**
* ice_sched_calc_wakeup - calculate RL profile wakeup parameter
+ * @hw: pointer to the HW struct
* @bw: bandwidth in Kbps
*
* This function calculates the wakeup parameter of RL profile.
*/
-static u16 ice_sched_calc_wakeup(s32 bw)
+static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
{
s64 bytes_per_sec, wakeup_int, wakeup_a, wakeup_b, wakeup_f;
s32 wakeup_f_int;
@@ -2057,7 +2909,7 @@ static u16 ice_sched_calc_wakeup(s32 bw)
/* Get the wakeup integer value */
bytes_per_sec = div64_long(((s64)bw * 1000), BITS_PER_BYTE);
- wakeup_int = div64_long(ICE_RL_PROF_FREQUENCY, bytes_per_sec);
+ wakeup_int = div64_long(hw->psm_clk_freq, bytes_per_sec);
if (wakeup_int > 63) {
wakeup = (u16)((1 << 15) | wakeup_int);
} else {
@@ -2066,8 +2918,7 @@ static u16 ice_sched_calc_wakeup(s32 bw)
*/
wakeup_b = (s64)ICE_RL_PROF_MULTIPLIER * wakeup_int;
wakeup_a = div64_long((s64)ICE_RL_PROF_MULTIPLIER *
- ICE_RL_PROF_FREQUENCY,
- bytes_per_sec);
+ hw->psm_clk_freq, bytes_per_sec);
/* Get Fraction value */
wakeup_f = wakeup_a - wakeup_b;
@@ -2087,13 +2938,15 @@ static u16 ice_sched_calc_wakeup(s32 bw)
/**
* ice_sched_bw_to_rl_profile - convert BW to profile parameters
+ * @hw: pointer to the HW struct
* @bw: bandwidth in Kbps
* @profile: profile parameters to return
*
* This function converts the BW to profile structure format.
*/
static enum ice_status
-ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
+ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
+ struct ice_aqc_rl_profile_elem *profile)
{
enum ice_status status = ICE_ERR_PARAM;
s64 bytes_per_sec, ts_rate, mv_tmp;
@@ -2113,7 +2966,7 @@ ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
for (i = 0; i < 64; i++) {
u64 pow_result = BIT_ULL(i);
- ts_rate = div64_long((s64)ICE_RL_PROF_FREQUENCY,
+ ts_rate = div64_long((s64)hw->psm_clk_freq,
pow_result * ICE_RL_PROF_TS_MULTIPLIER);
if (ts_rate <= 0)
continue;
@@ -2137,7 +2990,7 @@ ice_sched_bw_to_rl_profile(u32 bw, struct ice_aqc_rl_profile_elem *profile)
if (found) {
u16 wm;
- wm = ice_sched_calc_wakeup(bw);
+ wm = ice_sched_calc_wakeup(hw, bw);
profile->rl_multiply = cpu_to_le16(mv);
profile->wake_up_calc = cpu_to_le16(wm);
profile->rl_encode = cpu_to_le16(encode);
@@ -2206,7 +3059,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
if (!rl_prof_elem)
return NULL;
- status = ice_sched_bw_to_rl_profile(bw, &rl_prof_elem->profile);
+ status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
if (status)
goto exit_add_rl_prof;
@@ -2941,6 +3794,156 @@ ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
}
/**
+ * ice_sched_get_ena_tc_bitmap - get enabled TC bitmap
+ * @pi: port info struct
+ * @tc_bitmap: 8 bits TC bitmap to check
+ * @ena_tc_bitmap: 8 bits enabled TC bitmap to return
+ *
+ * This function returns enabled TC bitmap in variable ena_tc_bitmap. Some TCs
+ * may be missing, it returns enabled TCs. This function needs to be called with
+ * scheduler lock held.
+ */
+static void
+ice_sched_get_ena_tc_bitmap(struct ice_port_info *pi,
+ unsigned long *tc_bitmap,
+ unsigned long *ena_tc_bitmap)
+{
+ u8 tc;
+
+ /* Some TC(s) may be missing after reset, adjust for replay */
+ ice_for_each_traffic_class(tc)
+ if (ice_is_tc_ena(*tc_bitmap, tc) &&
+ (ice_sched_get_tc_node(pi, tc)))
+ set_bit(tc, ena_tc_bitmap);
+}
+
+/**
+ * ice_sched_replay_agg - recreate aggregator node(s)
+ * @hw: pointer to the HW struct
+ *
+ * This function recreate aggregator type nodes which are not replayed earlier.
+ * It also replay aggregator BW information. These aggregator nodes are not
+ * associated with VSI type node yet.
+ */
+void ice_sched_replay_agg(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+
+ mutex_lock(&pi->sched_lock);
+ list_for_each_entry(agg_info, &hw->agg_list, list_entry)
+ /* replay aggregator (re-create aggregator node) */
+ if (!bitmap_equal(agg_info->tc_bitmap, agg_info->replay_tc_bitmap,
+ ICE_MAX_TRAFFIC_CLASS)) {
+ DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ enum ice_status status;
+
+ bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ ice_sched_get_ena_tc_bitmap(pi,
+ agg_info->replay_tc_bitmap,
+ replay_bitmap);
+ status = ice_sched_cfg_agg(hw->port_info,
+ agg_info->agg_id,
+ ICE_AGG_TYPE_AGG,
+ replay_bitmap);
+ if (status) {
+ dev_info(ice_hw_to_dev(hw),
+ "Replay agg id[%d] failed\n",
+ agg_info->agg_id);
+ /* Move on to next one */
+ continue;
+ }
+ }
+ mutex_unlock(&pi->sched_lock);
+}
+
+/**
+ * ice_sched_replay_agg_vsi_preinit - Agg/VSI replay pre initialization
+ * @hw: pointer to the HW struct
+ *
+ * This function initialize aggregator(s) TC bitmap to zero. A required
+ * preinit step for replaying aggregators.
+ */
+void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
+{
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+
+ mutex_lock(&pi->sched_lock);
+ list_for_each_entry(agg_info, &hw->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+
+ agg_info->tc_bitmap[0] = 0;
+ list_for_each_entry(agg_vsi_info, &agg_info->agg_vsi_list,
+ list_entry)
+ agg_vsi_info->tc_bitmap[0] = 0;
+ }
+ mutex_unlock(&pi->sched_lock);
+}
+
+/**
+ * ice_sched_replay_vsi_agg - replay aggregator & VSI to aggregator node(s)
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ *
+ * This function replays aggregator node, VSI to aggregator type nodes, and
+ * their node bandwidth information. This function needs to be called with
+ * scheduler lock held.
+ */
+static enum ice_status
+ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+{
+ DECLARE_BITMAP(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_port_info *pi = hw->port_info;
+ struct ice_sched_agg_info *agg_info;
+ enum ice_status status;
+
+ bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
+ if (!agg_info)
+ return 0; /* Not present in list - default Agg case */
+ agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
+ if (!agg_vsi_info)
+ return 0; /* Not present in list - default Agg case */
+ ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
+ replay_bitmap);
+ /* Replay aggregator node associated to vsi_handle */
+ status = ice_sched_cfg_agg(hw->port_info, agg_info->agg_id,
+ ICE_AGG_TYPE_AGG, replay_bitmap);
+ if (status)
+ return status;
+
+ bitmap_zero(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ ice_sched_get_ena_tc_bitmap(pi, agg_vsi_info->replay_tc_bitmap,
+ replay_bitmap);
+ /* Move this VSI (vsi_handle) to above aggregator */
+ return ice_sched_assoc_vsi_to_agg(pi, agg_info->agg_id, vsi_handle,
+ replay_bitmap);
+}
+
+/**
+ * ice_replay_vsi_agg - replay VSI to aggregator node
+ * @hw: pointer to the HW struct
+ * @vsi_handle: software VSI handle
+ *
+ * This function replays association of VSI to aggregator type nodes, and
+ * node bandwidth information.
+ */
+enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+{
+ struct ice_port_info *pi = hw->port_info;
+ enum ice_status status;
+
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_replay_vsi_agg(hw, vsi_handle);
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
* ice_sched_replay_q_bw - replay queue type node BW
* @pi: port information structure
* @q_ctx: queue context structure
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 0e55ae0d446f..9beef8f0ec76 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -8,6 +8,7 @@
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
+#define ICE_AGG_LAYER_OFFSET 6
#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
/* Burst size is a 12 bits register that is configured while creating the RL
* profile(s). MSB is a granularity bit and tells the granularity type
@@ -23,12 +24,16 @@
((BIT(11) - 1) * 64) /* In Bytes */
#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
-#define ICE_RL_PROF_FREQUENCY 446000000
#define ICE_RL_PROF_ACCURACY_BYTES 128
#define ICE_RL_PROF_MULTIPLIER 10000
#define ICE_RL_PROF_TS_MULTIPLIER 32
#define ICE_RL_PROF_FRACTION 512
+#define ICE_PSM_CLK_367MHZ_IN_HZ 367647059
+#define ICE_PSM_CLK_416MHZ_IN_HZ 416666667
+#define ICE_PSM_CLK_446MHZ_IN_HZ 446428571
+#define ICE_PSM_CLK_390MHZ_IN_HZ 390625000
+
/* BW rate limit profile parameters list entry along
* with bandwidth maintained per layer in port info
*/
@@ -43,6 +48,8 @@ struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u16 vsi_handle;
+ /* save aggregator VSI TC bitmap */
+ DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
struct ice_sched_agg_info {
@@ -51,6 +58,8 @@ struct ice_sched_agg_info {
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
+ /* save aggregator TC bitmap */
+ DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
/* FW AQ command calls */
@@ -60,6 +69,8 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
+
void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
void ice_sched_clear_agg(struct ice_hw *hw);
@@ -78,6 +89,14 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+
+/* Tx scheduler rate limiter functions */
+enum ice_status
+ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
+ enum ice_agg_type agg_type, u8 tc_bitmap);
+enum ice_status
+ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
+ u8 tc_bitmap);
enum ice_status
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
@@ -85,6 +104,9 @@ enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
+void ice_sched_replay_agg(struct ice_hw *hw);
+enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_status.h b/drivers/net/ethernet/intel/ice/ice_status.h
index 4028c6365172..dbf66057371d 100644
--- a/drivers/net/ethernet/intel/ice/ice_status.h
+++ b/drivers/net/ethernet/intel/ice/ice_status.h
@@ -29,6 +29,7 @@ enum ice_status {
ICE_ERR_HW_TABLE = -19,
ICE_ERR_FW_DDP_MISMATCH = -20,
+ ICE_ERR_NVM = -50,
ICE_ERR_NVM_CHECKSUM = -51,
ICE_ERR_BUF_TOO_SHORT = -52,
ICE_ERR_NVM_BLANK_MODE = -53,
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index c33612132ddf..67c965a3f5d2 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -603,7 +603,7 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
}
} while (req_desc && !status);
- devm_kfree(ice_hw_to_dev(hw), (void *)rbuf);
+ devm_kfree(ice_hw_to_dev(hw), rbuf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index b6fa83c619dd..b7dc25da1202 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -375,6 +375,11 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
+ if (rx_ring->skb) {
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+ }
+
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
@@ -384,10 +389,6 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
- if (rx_buf->skb) {
- dev_kfree_skb(rx_buf->skb);
- rx_buf->skb = NULL;
- }
if (!rx_buf->page)
continue;
@@ -443,6 +444,22 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
}
/**
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+{
+ if (ice_ring_uses_build_skb(rx_ring))
+ return ICE_SKB_PAD;
+ else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+ return XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+/**
* ice_setup_rx_ring - Allocate the Rx descriptors
* @rx_ring: the Rx ring to set up
*
@@ -476,6 +493,7 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->rx_offset = ice_rx_offset(rx_ring);
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
@@ -493,22 +511,6 @@ err:
return -ENOMEM;
}
-/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
-{
- if (ice_ring_uses_build_skb(rx_ring))
- return ICE_SKB_PAD;
- else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
- return XDP_PACKET_HEADROOM;
-
- return 0;
-}
-
static unsigned int
ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
{
@@ -517,8 +519,8 @@ ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size
#if (PAGE_SIZE < 8192)
truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
- truesize = ice_rx_offset(rx_ring) ?
- SKB_DATA_ALIGN(ice_rx_offset(rx_ring) + size) +
+ truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
#endif
@@ -537,22 +539,20 @@ static int
ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
- int err, result = ICE_XDP_PASS;
struct ice_ring *xdp_ring;
+ int err;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- break;
+ return ICE_XDP_PASS;
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
- break;
+ return ice_xmit_xdp_buff(xdp, xdp_ring);
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
- break;
+ return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
@@ -560,11 +560,8 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- result = ICE_XDP_CONSUMED;
- break;
+ return ICE_XDP_CONSUMED;
}
-
- return result;
}
/**
@@ -656,7 +653,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
bi->dma = dma;
bi->page = page;
- bi->page_offset = ice_rx_offset(rx_ring);
+ bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
@@ -729,15 +726,6 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
}
/**
- * ice_page_is_reserved - check if reuse is possible
- * @page: page struct to check
- */
-static bool ice_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
-/**
* ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
* @rx_buf: Rx buffer to adjust
* @size: Size of adjustment
@@ -775,8 +763,8 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
- /* avoid re-using remote pages */
- if (unlikely(ice_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -818,7 +806,7 @@ ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size + ice_rx_offset(rx_ring));
+ unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
#else
unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#endif
@@ -864,7 +852,6 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
/**
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
- * @skb: skb to be used
* @size: size of buffer to add to skb
* @rx_buf_pgcnt: rx_buf page refcount
*
@@ -872,8 +859,8 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* for use by the CPU.
*/
static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
- const unsigned int size, int *rx_buf_pgcnt)
+ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
+ int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
@@ -885,7 +872,6 @@ ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
0;
#endif
prefetchw(rx_buf->page);
- *skb = rx_buf->skb;
if (!size)
return rx_buf;
@@ -1047,29 +1033,24 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
/* clear contents of buffer_info */
rx_buf->page = NULL;
- rx_buf->skb = NULL;
}
/**
* ice_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
*
* If the buffer is an EOP buffer, this function exits returning false,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
-ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
return false;
- /* place skb in next buffer to be received */
- rx_ring->rx_buf[rx_ring->next_to_clean].skb = skb;
rx_ring->rx_stats.non_eop_descs++;
return true;
@@ -1089,24 +1070,26 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*/
int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_res, xdp_xmit = 0;
+ struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
struct xdp_buff xdp;
bool failure;
- xdp.rxq = &rx_ring->xdp_rxq;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
- struct sk_buff *skb;
+ unsigned char *hard_start;
unsigned int size;
u16 stat_err_bits;
int rx_buf_pgcnt;
@@ -1141,7 +1124,7 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, &skb, size, &rx_buf_pgcnt);
+ rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
if (!size) {
xdp.data = NULL;
@@ -1151,10 +1134,9 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
goto construct_skb;
}
- xdp.data = page_address(rx_buf->page) + rx_buf->page_offset;
- xdp.data_hard_start = xdp.data - ice_rx_offset(rx_ring);
- xdp.data_meta = xdp.data;
- xdp.data_end = xdp.data + size;
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
@@ -1204,7 +1186,7 @@ construct_skb:
cleaned_count++;
/* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc, skb))
+ if (ice_is_non_eop(rx_ring, rx_desc))
continue;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
@@ -1234,6 +1216,7 @@ construct_skb:
/* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag);
+ skb = NULL;
/* update budget accounting */
total_rx_pkts++;
@@ -1244,6 +1227,7 @@ construct_skb:
if (xdp_prog)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
@@ -1505,22 +1489,11 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
struct ice_vsi *vsi = q_vector->vsi;
u32 itr_val;
- /* when exiting WB_ON_ITR lets set a low ITR value and trigger
- * interrupts to expire right away in case we have more work ready to go
- * already
+ /* when exiting WB_ON_ITR just reset the countdown and let ITR
+ * resume it's normal "interrupts-enabled" path
*/
- if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE) {
- itr_val = ice_buildreg_itr(rx->itr_idx, ICE_WB_ON_ITR_USECS);
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
- /* set target back to last user set value */
- rx->target_itr = rx->itr_setting;
- /* set current to what we just wrote and dynamic if needed */
- rx->current_itr = ICE_WB_ON_ITR_USECS |
- (rx->itr_setting & ICE_ITR_DYNAMIC);
- /* allow normal interrupt flow to start */
+ if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
q_vector->itr_countdown = 0;
- return;
- }
/* This will do nothing if dynamic updates are not enabled */
ice_update_itr(q_vector, tx);
@@ -1560,10 +1533,8 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
q_vector->itr_countdown--;
}
- if (!test_bit(__ICE_DOWN, q_vector->vsi->state))
- wr32(&q_vector->vsi->back->hw,
- GLINT_DYN_CTL(q_vector->reg_idx),
- itr_val);
+ if (!test_bit(__ICE_DOWN, vsi->state))
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
}
/**
@@ -1573,30 +1544,29 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
* We need to tell hardware to write-back completed descriptors even when
* interrupts are disabled. Descriptors will be written back on cache line
* boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
- * descriptors may not be written back if they don't fill a cache line until the
- * next interrupt.
+ * descriptors may not be written back if they don't fill a cache line until
+ * the next interrupt.
*
- * This sets the write-back frequency to 2 microseconds as that is the minimum
- * value that's not 0 due to ITR granularity. Also, set the INTENA_MSK bit to
- * make sure hardware knows we aren't meddling with the INTENA_M bit.
+ * This sets the write-back frequency to whatever was set previously for the
+ * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
+ * aren't meddling with the INTENA_M bit.
*/
static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
{
struct ice_vsi *vsi = q_vector->vsi;
- /* already in WB_ON_ITR mode no need to change it */
+ /* already in wb_on_itr mode no need to change it */
if (q_vector->itr_countdown == ICE_IN_WB_ON_ITR_MODE)
return;
- if (q_vector->num_ring_rx)
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
- ICE_RX_ITR));
-
- if (q_vector->num_ring_tx)
- wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
- ICE_GLINT_DYN_CTL_WB_ON_ITR(ICE_WB_ON_ITR_USECS,
- ICE_TX_ITR));
+ /* use previously set ITR values for all of the ITR indices by
+ * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
+ * be static in non-adaptive mode (user configured)
+ */
+ wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
+ ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
+ GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
+ GLINT_DYN_CTL_WB_ON_ITR_M);
q_vector->itr_countdown = ICE_IN_WB_ON_ITR_MODE;
}
@@ -1663,8 +1633,13 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
}
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ /* Set the writeback on ITR so partial completions of
+ * cache-lines will still continue even if we're polling.
+ */
+ ice_set_wb_on_itr(q_vector);
return budget;
+ }
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
@@ -2421,7 +2396,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
if (unlikely(skb->priority == TC_PRIO_CONTROL &&
vsi->type == ICE_VSI_PF &&
- vsi->port_info->is_sw_lldp))
+ vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ff1a1cbd078e..5dab77504fa5 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -165,7 +165,6 @@ struct ice_tx_offload_params {
struct ice_rx_buf {
union {
struct {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
@@ -240,7 +239,6 @@ enum ice_rx_dtype {
#define ICE_DFLT_INTRL 0
#define ICE_MAX_INTRL 236
-#define ICE_WB_ON_ITR_USECS 2
#define ICE_IN_WB_ON_ITR_MODE 255
/* Sets WB_ON_ITR and assumes INTENA bit is already cleared, which allows
* setting the MSK_M bit to tell hardware to ignore the INTENA_M bit. Also,
@@ -296,8 +294,10 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
struct xsk_buff_pool *xsk_pool;
+ u16 rx_offset;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
+ struct sk_buff *skb;
/* CLX - the below items are only accessed infrequently and should be
* in their own cache line if possible
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index bc2f4390b51d..02b12736ea80 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -191,12 +191,7 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- if (napi_gro_receive(&rx_ring->q_vector->napi, skb) == GRO_DROP) {
- /* this is tracked separately to help us debug stack drops */
- rx_ring->rx_stats.gro_dropped++;
- netdev_dbg(rx_ring->netdev, "Receive Queue %d: Dropped packet from GRO\n",
- rx_ring->q_index);
- }
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 2226a291a394..a6cb0c35748c 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -313,14 +313,62 @@ struct ice_orom_info {
u16 build; /* Build version of OROM */
};
-/* NVM Information */
+/* NVM version information */
struct ice_nvm_info {
+ u32 eetrack;
+ u8 major;
+ u8 minor;
+};
+
+/* netlist version information */
+struct ice_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ice_flash_bank {
+ ICE_INVALID_FLASH_BANK,
+ ICE_1ST_FLASH_BANK,
+ ICE_2ND_FLASH_BANK,
+};
+
+/* Enumeration of which flash bank is desired to read from, either the active
+ * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
+ * code which just wants to read the active or inactive flash bank.
+ */
+enum ice_bank_select {
+ ICE_ACTIVE_FLASH_BANK,
+ ICE_INACTIVE_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ice_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Pointer to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ice_flash_bank nvm_bank; /* Active NVM bank */
+ enum ice_flash_bank orom_bank; /* Active OROM bank */
+ enum ice_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ice_flash_info {
struct ice_orom_info orom; /* Option ROM version info */
- u32 eetrack; /* NVM data version */
+ struct ice_nvm_info nvm; /* NVM version information */
+ struct ice_netlist_info netlist;/* Netlist version info */
+ struct ice_bank_info banks; /* Flash Bank information */
u16 sr_words; /* Shadow RAM size in words */
u32 flash_size; /* Size of available flash in bytes */
- u8 major_ver; /* major version of NVM package */
- u8 minor_ver; /* minor version of dev starter */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
@@ -348,16 +396,6 @@ struct ice_link_default_override_tlv {
#define ICE_NVM_VER_LEN 32
-/* netlist version information */
-struct ice_netlist_ver_info {
- u32 major; /* major high/low */
- u32 minor; /* minor high/low */
- u32 type; /* type high/low */
- u32 rev; /* revision high/low */
- u32 hash; /* SHA-1 hash word */
- u16 cust_ver; /* customer version */
-};
-
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
@@ -365,7 +403,11 @@ struct ice_netlist_ver_info {
#define ice_for_each_traffic_class(_i) \
for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
+/* ICE_DFLT_AGG_ID means that all new VM(s)/VSI node connects
+ * to driver defined policy for default aggregator
+ */
#define ICE_INVAL_TEID 0xFFFFFFFF
+#define ICE_DFLT_AGG_ID 0
struct ice_sched_node {
struct ice_sched_node *parent;
@@ -514,6 +556,14 @@ struct ice_dcbx_cfg {
#define ICE_DCBX_APPS_NON_WILLING 0x1
};
+struct ice_qos_cfg {
+ struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
+ struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
+ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
+ u8 dcbx_status : 3; /* see ICE_DCBX_STATUS_DIS */
+ u8 is_sw_lldp : 1;
+};
+
struct ice_port_info {
struct ice_sched_node *root; /* Root Node per Port */
struct ice_hw *hw; /* back pointer to HW instance */
@@ -537,13 +587,7 @@ struct ice_port_info {
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
/* List contain profile ID(s) and other params per layer */
struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
- struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
- /* DCBX info */
- struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
- struct ice_dcbx_cfg desired_dcbx_cfg; /* CEE Desired Cfg */
- /* LLDP/DCBX Status */
- u8 dcbx_status:3; /* see ICE_DCBX_STATUS_DIS */
- u8 is_sw_lldp:1;
+ struct ice_qos_cfg qos_cfg;
u8 is_vf:1;
};
@@ -576,6 +620,8 @@ struct ice_hw {
void *back;
struct ice_aqc_layer_props *layer_info;
struct ice_port_info *port_info;
+ /* PSM clock frequency for calculating RL profile params */
+ u32 psm_clk_freq;
u64 debug_mask; /* bitmap for debug mask */
enum ice_mac_type mac_type;
@@ -605,10 +651,9 @@ struct ice_hw {
u8 evb_veb; /* true for VEB, false for VEPA */
u8 reset_ongoing; /* true if HW is in reset, false otherwise */
struct ice_bus_info bus;
- struct ice_nvm_info nvm;
+ struct ice_flash_info flash;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
struct ice_hw_func_caps func_caps; /* function capabilities */
- struct ice_netlist_ver_info netlist_ver; /* netlist version info */
struct ice_switch_info *switch_info; /* switch filter lists */
@@ -765,6 +810,7 @@ struct ice_hw_port_stats {
};
/* Checksum and Shadow RAM pointers */
+#define ICE_SR_NVM_CTRL_WORD 0x00
#define ICE_SR_BOOT_CFG_PTR 0x132
#define ICE_SR_NVM_WOL_CFG 0x19
#define ICE_NVM_OROM_VER_OFF 0x02
@@ -784,10 +830,71 @@ struct ice_hw_port_stats {
#define ICE_OROM_VER_MASK (0xff << ICE_OROM_VER_SHIFT)
#define ICE_SR_PFA_PTR 0x40
#define ICE_SR_1ST_NVM_BANK_PTR 0x42
+#define ICE_SR_NVM_BANK_SIZE 0x43
#define ICE_SR_1ST_OROM_BANK_PTR 0x44
+#define ICE_SR_OROM_BANK_SIZE 0x45
#define ICE_SR_NETLIST_BANK_PTR 0x46
+#define ICE_SR_NETLIST_BANK_SIZE 0x47
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
+/* CSS Header words */
+#define ICE_NVM_CSS_SREV_L 0x14
+#define ICE_NVM_CSS_SREV_H 0x15
+
+/* Length of CSS header section in words */
+#define ICE_CSS_HEADER_LENGTH 330
+
+/* Offset of Shadow RAM copy in the NVM bank area. */
+#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32)
+
+/* Size in bytes of Option ROM trailer */
+#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define ICE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define ICE_NETLIST_TYPE_OFFSET 0x0000
+#define ICE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ice_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define ICE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+
+#define ICE_LINK_TOPO_MODULE_LEN ICE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define ICE_LINK_TOPO_NODE_COUNT ICE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+
+#define ICE_LINK_TOPO_NODE_COUNT_M ICE_M(0x3FF, 0)
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define ICE_NETLIST_ID_BLK_SIZE 0x30
+#define ICE_NETLIST_ID_BLK_OFFSET(n) ICE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define ICE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define ICE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define ICE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define ICE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define ICE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define ICE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define ICE_NETLIST_ID_BLK_REV_LOW 0x08
+#define ICE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define ICE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define ICE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* Auxiliary field, mask, and shift definition for Shadow RAM and NVM Flash */
+#define ICE_SR_CTRL_WORD_1_S 0x06
+#define ICE_SR_CTRL_WORD_1_M (0x03 << ICE_SR_CTRL_WORD_1_S)
+#define ICE_SR_CTRL_WORD_VALID 0x1
+#define ICE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define ICE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define ICE_SR_CTRL_WORD_NVM_BANK BIT(5)
+
+#define ICE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
/* Link override related */
#define ICE_SR_PFA_LINK_OVERRIDE_WORDS 10
#define ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS 4
@@ -803,4 +910,9 @@ struct ice_hw_port_stats {
/* Hash redirection LUT for VSI - maximum array size */
#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
+/* AQ API version for LLDP_FILTER_CONTROL */
+#define ICE_FW_API_LLDP_FLTR_MAJ 1
+#define ICE_FW_API_LLDP_FLTR_MIN 7
+#define ICE_FW_API_LLDP_FLTR_PATCH 1
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index ec7f6c64132e..bf5fd812ea0e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -1057,11 +1057,45 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
}
/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+
+ if (!vsi->agg_node)
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ dev_dbg(dev,
+ "agg_id %u already has reached max_num_vsis %u\n",
+ vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+ return;
+ }
+
+ status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+ vsi->idx, vsi->tc_cfg.ena_tc);
+ if (status)
+ dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, vsi->agg_node->agg_id);
+ else
+ vsi->agg_node->num_vsis++;
+}
+
+/**
* ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
* @vf: VF to rebuild host configuration on
*/
static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
{
+ struct ice_vsi *vsi = vf->pf->vsi[vf->lan_vsi_idx];
struct device *dev = ice_pf_to_dev(vf->pf);
ice_vf_set_host_trust_cfg(vf);
@@ -1073,6 +1107,8 @@ static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
if (ice_vf_rebuild_host_vlan_cfg(vf))
dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
vf->vf_id);
+ /* rebuild aggregator node config for main VF VSI */
+ ice_vf_rebuild_aggregator_node_cfg(vsi);
}
/**
@@ -1677,6 +1713,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!num_vfs) {
if (!pci_vfs_assigned(pdev)) {
ice_free_vfs(pf);
+ if (pf->lag)
+ ice_enable_lag(pf->lag);
return 0;
}
@@ -1688,6 +1726,8 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (err)
return err;
+ if (pf->lag)
+ ice_disable_lag(pf->lag);
return num_vfs;
}
@@ -2312,12 +2352,12 @@ bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ bool rm_promisc, alluni = false, allmulti = false;
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
struct device *dev;
- bool rm_promisc;
int ret = 0;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -2344,8 +2384,13 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- rm_promisc = !(info->flags & FLAG_VF_UNICAST_PROMISC) &&
- !(info->flags & FLAG_VF_MULTICAST_PROMISC);
+ if (info->flags & FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+
+ if (info->flags & FLAG_VF_MULTICAST_PROMISC)
+ allmulti = true;
+
+ rm_promisc = !allmulti && !alluni;
if (vsi->num_vlan || vf->port_vlan_info) {
struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
@@ -2399,12 +2444,12 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
enum ice_status status;
u8 promisc_m;
- if (info->flags & FLAG_VF_UNICAST_PROMISC) {
+ if (alluni) {
if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
- } else if (info->flags & FLAG_VF_MULTICAST_PROMISC) {
+ } else if (allmulti) {
if (vf->port_vlan_info || vsi->num_vlan)
promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
else
@@ -2432,15 +2477,16 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
}
}
- if (info->flags & FLAG_VF_MULTICAST_PROMISC)
- set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- else
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ if (allmulti &&
+ !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id);
+ else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id);
- if (info->flags & FLAG_VF_UNICAST_PROMISC)
- set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
- else
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id);
+ else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
+ dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id);
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 1782146db644..83f3c9574ed1 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -260,45 +260,6 @@ free_buf:
}
/**
- * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
- * @vsi: VSI to allocate the buffer pool on
- *
- * Returns 0 on success, negative on error
- */
-static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
-{
- if (vsi->xsk_pools)
- return 0;
-
- vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
- GFP_KERNEL);
-
- if (!vsi->xsk_pools) {
- vsi->num_xsk_pools = 0;
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/**
- * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
- * @vsi: VSI from which the VSI will be removed
- * @qid: Ring/qid associated with the buffer pool
- */
-static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
-{
- vsi->xsk_pools[qid] = NULL;
- vsi->num_xsk_pools_used--;
-
- if (vsi->num_xsk_pools_used == 0) {
- kfree(vsi->xsk_pools);
- vsi->xsk_pools = NULL;
- vsi->num_xsk_pools = 0;
- }
-}
-
-/**
* ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
@@ -307,12 +268,12 @@ static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
*/
static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
- if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
- !vsi->xsk_pools[qid])
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+ if (!pool)
return -EINVAL;
- xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
- ice_xsk_remove_pool(vsi, qid);
+ xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
}
@@ -333,22 +294,11 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- if (!vsi->num_xsk_pools)
- vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
- if (qid >= vsi->num_xsk_pools)
+ if (qid >= vsi->netdev->real_num_rx_queues ||
+ qid >= vsi->netdev->real_num_tx_queues)
return -EINVAL;
- err = ice_xsk_alloc_pools(vsi);
- if (err)
- return err;
-
- if (vsi->xsk_pools && vsi->xsk_pools[qid])
- return -EBUSY;
-
- vsi->xsk_pools[qid] = pool;
- vsi->num_xsk_pools_used++;
-
- err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
+ err = xsk_pool_dma_map(pool, ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -517,11 +467,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
u32 act;
rcu_read_lock();
+ /* ZC patch is enabled only when XDP program is set,
+ * so here it can not be NULL
+ */
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (!xdp_prog) {
- rcu_read_unlock();
- return ICE_XDP_PASS;
- }
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
@@ -842,11 +791,8 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
int i;
- if (!vsi->xsk_pools)
- return false;
-
- for (i = 0; i < vsi->num_xsk_pools; i++) {
- if (vsi->xsk_pools[i])
+ ice_for_each_rxq(vsi, i) {
+ if (xsk_get_pool_from_qid(vsi->netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 03f78fdb0dcd..878b31d534ec 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -316,7 +316,7 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
break;
case E1000_TDBAL(0):
for (n = 0; n < 4; n++)
- regs[n] = rd32(E1000_RDBAL(n));
+ regs[n] = rd32(E1000_TDBAL(n));
break;
case E1000_TDBAH(0):
for (n = 0; n < 4; n++)
@@ -3156,7 +3156,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* the PCIe SR-IOV capability.
*/
if (pdev->is_virtfn) {
- WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+ WARN(1, KERN_ERR "%s (%x:%x) should not be a VF!\n",
pci_name(pdev), pdev->vendor, pdev->device);
return -EINVAL;
}
@@ -4482,8 +4482,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
else
mrqc |= E1000_MRQC_ENABLE_VMDQ;
} else {
- if (hw->mac.type != e1000_i211)
- mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
+ mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
}
igb_vmm_control(adapter);
@@ -5959,15 +5958,6 @@ static int igb_tso(struct igb_ring *tx_ring,
return 1;
}
-static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
@@ -5990,10 +5980,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- igb_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -8227,18 +8214,13 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool igb_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(igb_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -8681,13 +8663,13 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
u16 cleaned_count = igb_desc_unused(rx_ring);
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
-
- xdp.rxq = &rx_ring->xdp_rxq;
+ u32 frame_sz = 0;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
+ frame_sz = igb_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
@@ -8715,12 +8697,12 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- igb_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = igb_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 30fdea24e94a..fb3fbcb13331 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2072,15 +2072,6 @@ static int igbvf_tso(struct igbvf_ring *tx_ring,
return 1;
}
-static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, __be16 protocol)
{
@@ -2102,10 +2093,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((protocol == htons(ETH_P_IPV6)) &&
- igbvf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 35baae900c1f..5d2809dfd06a 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -217,6 +217,8 @@ struct igc_adapter {
struct timecounter tc;
struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
ktime_t ptp_reset_start; /* Reset time in clock mono */
+
+ char fw_version[32];
};
void igc_up(struct igc_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 32f5fd684139..b909f00a79e6 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -129,7 +129,6 @@
/* 1000BASE-T Status Register */
#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
-#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
/* PHY GPY 211 registers */
#define STANDARD_AN_REG_MASK 0x0007 /* MMD */
@@ -160,6 +159,7 @@
#define IGC_NVM_RW_REG_START 1 /* Start operation */
#define IGC_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
#define IGC_NVM_POLL_READ 0 /* Flag for polling for read complete */
+#define IGC_NVM_DEV_STARTER 5 /* Dev_starter Version */
/* NVM Word Offsets */
#define NVM_CHECKSUM_REG 0x003F
@@ -179,7 +179,6 @@
#define IGC_STATUS_LU 0x00000002 /* Link up.0=no,1=link */
#define IGC_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */
#define IGC_STATUS_FUNC_SHIFT 2
-#define IGC_STATUS_FUNC_1 0x00000004 /* Function 1 */
#define IGC_STATUS_TXOFF 0x00000010 /* transmission paused */
#define IGC_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
#define IGC_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
@@ -284,7 +283,6 @@
#define IGC_TCTL_CT 0x00000ff0 /* collision threshold */
#define IGC_TCTL_COLD 0x003ff000 /* collision distance */
#define IGC_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
-#define IGC_TCTL_MULR 0x10000000 /* Multiple request support */
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
index 4b9ec7d0b727..495bed47ed0a 100644
--- a/drivers/net/ethernet/intel/igc/igc_dump.c
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -75,7 +75,7 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
break;
case IGC_TDBAL(0):
for (n = 0; n < 4; n++)
- regs[n] = rd32(IGC_RDBAL(n));
+ regs[n] = rd32(IGC_TDBAL(n));
break;
case IGC_TDBAH(0):
for (n = 0; n < 4; n++)
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index ec8cd69d4992..824a6c454bca 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -129,11 +129,28 @@ static void igc_ethtool_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u16 nvm_version = 0;
+ u16 gphy_version;
+
+ strscpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver));
+
+ /* NVM image version is reported as firmware version for i225 device */
+ hw->nvm.ops.read(hw, IGC_NVM_DEV_STARTER, 1, &nvm_version);
+
+ /* gPHY firmware version is reported as PHY FW version */
+ gphy_version = igc_read_phy_fw_version(hw);
+
+ scnprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%x:%x",
+ nvm_version,
+ gphy_version);
- strlcpy(drvinfo->driver, igc_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version));
- /* add fw_version here */
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IGC_PRIV_FLAGS_STR_LEN;
@@ -544,7 +561,6 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
if (ret_val == 0)
hw->nvm.ops.update(hw);
- /* check if need: igc_set_fw_version(adapter); */
kfree(eeprom_buff);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 9da5f83ce456..4461f8b9a864 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -113,7 +113,6 @@ struct igc_nvm_operations {
s32 (*write)(struct igc_hw *hw, u16 offset, u16 i, u16 *data);
s32 (*update)(struct igc_hw *hw);
s32 (*validate)(struct igc_hw *hw);
- s32 (*valid_led_default)(struct igc_hw *hw, u16 *data);
};
struct igc_phy_operations {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index afd6a62da29d..7ac9597ddb84 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -949,15 +949,6 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
}
}
-static inline bool igc_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
@@ -980,10 +971,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if ((first->protocol == htons(ETH_P_IP) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- (first->protocol == htons(ETH_P_IPV6) &&
- igc_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IGC_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -1660,18 +1648,13 @@ static void igc_reuse_rx_page(struct igc_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool igc_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool igc_can_reuse_rx_page(struct igc_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(igc_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -3686,6 +3669,7 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.prc1522 += rd32(IGC_PRC1522);
adapter->stats.tlpic += rd32(IGC_TLPIC);
adapter->stats.rlpic += rd32(IGC_RLPIC);
+ adapter->stats.hgptc += rd32(IGC_HGPTC);
mpc = rd32(IGC_MPC);
adapter->stats.mpc += mpc;
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c
index 8e1799508edc..83aeb5e7076f 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.c
+++ b/drivers/net/ethernet/intel/igc/igc_phy.c
@@ -791,3 +791,21 @@ s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data)
return ret_val;
}
+
+/**
+ * igc_read_phy_fw_version - Read gPHY firmware version
+ * @hw: pointer to the HW structure
+ */
+u16 igc_read_phy_fw_version(struct igc_hw *hw)
+{
+ struct igc_phy_info *phy = &hw->phy;
+ u16 gphy_version = 0;
+ u16 ret_val;
+
+ /* NVM image version is reported as firmware version for i225 device */
+ ret_val = phy->ops.read_reg(hw, IGC_GPHY_VERSION, &gphy_version);
+ if (ret_val)
+ hw_dbg("igc_phy: read wrong gphy version\n");
+
+ return gphy_version;
+}
diff --git a/drivers/net/ethernet/intel/igc/igc_phy.h b/drivers/net/ethernet/intel/igc/igc_phy.h
index 25cba33de7e2..1b031372d206 100644
--- a/drivers/net/ethernet/intel/igc/igc_phy.h
+++ b/drivers/net/ethernet/intel/igc/igc_phy.h
@@ -17,5 +17,6 @@ void igc_power_up_phy_copper(struct igc_hw *hw);
void igc_power_down_phy_copper(struct igc_hw *hw);
s32 igc_write_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 data);
s32 igc_read_phy_reg_gpy(struct igc_hw *hw, u32 offset, u16 *data);
+u16 igc_read_phy_fw_version(struct igc_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index b52dd9d737e8..3e5cb7aef9da 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -13,6 +13,7 @@
#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
+#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
/* Internal Packet Buffer Size Registers */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index de0fc6ecf491..a604552fa634 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -349,6 +349,7 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
+ u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 393d1c2cd853..fae84202d870 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1520,7 +1520,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
}
}
-static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
+static unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring)
{
return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0;
}
@@ -1561,7 +1561,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = ixgbe_rx_offset(rx_ring);
+ bi->page_offset = rx_ring->rx_offset;
page_ref_add(page, USHRT_MAX - 1);
bi->pagecnt_bias = USHRT_MAX;
rx_ring->rx_stats.alloc_rx_page++;
@@ -1940,19 +1940,14 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool ixgbe_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
int rx_buffer_pgcnt)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(ixgbe_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -2006,8 +2001,8 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
#if (PAGE_SIZE < 8192)
unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) :
+ unsigned int truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(rx_ring->rx_offset + size) :
SKB_DATA_ALIGN(size);
#endif
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
@@ -2254,8 +2249,8 @@ static unsigned int ixgbe_rx_frame_truesize(struct ixgbe_ring *rx_ring,
#if (PAGE_SIZE < 8192)
truesize = ixgbe_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
- truesize = ring_uses_build_skb(rx_ring) ?
- SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) +
+ truesize = rx_ring->rx_offset ?
+ SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
SKB_DATA_ALIGN(size);
#endif
@@ -2291,22 +2286,22 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
const int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbe_adapter *adapter = q_vector->adapter;
#ifdef IXGBE_FCOE
int ddp_bytes;
unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
+ unsigned int offset = rx_ring->rx_offset;
unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
- xdp.rxq = &rx_ring->xdp_rxq;
-
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ixgbe_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
@@ -2336,12 +2331,11 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbe_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
@@ -6584,6 +6578,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->rx_offset = ixgbe_rx_offset(rx_ring);
/* XDP RX-queue info */
if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
@@ -8040,15 +8035,6 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 1;
}
-static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
struct ixgbe_ipsec_tx_data *itd)
@@ -8074,10 +8060,7 @@ csum_failed:
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbe_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
@@ -10278,8 +10261,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4061cd7db5dd..449d7d5b280d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -781,18 +781,13 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
new_buff->pagecnt_bias = old_buff->pagecnt_bias;
}
-static inline bool ixgbevf_page_is_reserved(struct page *page)
-{
- return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
-}
-
static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
{
unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
struct page *page = rx_buffer->page;
- /* avoid re-using remote pages */
- if (unlikely(ixgbevf_page_is_reserved(page)))
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
return false;
#if (PAGE_SIZE < 8192)
@@ -1121,19 +1116,18 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0;
struct ixgbevf_adapter *adapter = q_vector->adapter;
u16 cleaned_count = ixgbevf_desc_unused(rx_ring);
struct sk_buff *skb = rx_ring->skb;
bool xdp_xmit = false;
struct xdp_buff xdp;
- xdp.rxq = &rx_ring->xdp_rxq;
-
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
+ frame_sz = ixgbevf_rx_frame_truesize(rx_ring, 0);
#endif
+ xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
while (likely(total_rx_packets < budget)) {
struct ixgbevf_rx_buffer *rx_buffer;
@@ -1161,12 +1155,12 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
/* retrieve a buffer from the ring */
if (!skb) {
- xdp.data = page_address(rx_buffer->page) +
- rx_buffer->page_offset;
- xdp.data_meta = xdp.data;
- xdp.data_hard_start = xdp.data -
- ixgbevf_rx_offset(rx_ring);
- xdp.data_end = xdp.data + size;
+ unsigned int offset = ixgbevf_rx_offset(rx_ring);
+ unsigned char *hard_start;
+
+ hard_start = page_address(rx_buffer->page) +
+ rx_buffer->page_offset - offset;
+ xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbevf_rx_frame_truesize(rx_ring, size);
@@ -3844,15 +3838,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
-static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
-{
- unsigned int offset = 0;
-
- ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
-
- return offset == skb_checksum_start_offset(skb);
-}
-
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
struct ixgbevf_ipsec_tx_data *itd)
@@ -3873,10 +3858,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
break;
case offsetof(struct sctphdr, checksum):
/* validate that this is actually an SCTP request */
- if (((first->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
- ((first->protocol == htons(ETH_P_IPV6)) &&
- ixgbevf_ipv6_csum_is_sctp(skb))) {
+ if (skb_csum_is_sctp(skb)) {
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 41815b609569..7fe15a3286f4 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -94,7 +94,6 @@ config MVPP2
config MVPP2_PTP
bool "Marvell Armada 8K Enable PTP support"
- depends on NETWORK_PHY_TIMESTAMPING
depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
(PTP_1588_CLOCK && MVPP2 = m)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc4d8d144401..a635cf84608a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -102,6 +102,8 @@
#define MVNETA_TX_NO_DATA_SWAP BIT(5)
#define MVNETA_DESC_SWAP BIT(6)
#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
+#define MVNETA_VLAN_PRIO_TO_RXQ 0x2440
+#define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
#define MVNETA_PORT_STATUS 0x2444
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
@@ -490,6 +492,7 @@ struct mvneta_port {
u8 mcast_count[256];
u16 tx_ring_size;
u16 rx_ring_size;
+ u8 prio_tc_map[8];
phy_interface_t phy_interface;
struct device_node *dn;
@@ -2263,11 +2266,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
/* Prefetch header */
prefetch(data);
-
- xdp->data_hard_start = data;
- xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
- xdp->data_end = xdp->data + data_len;
- xdp_set_data_meta_invalid(xdp);
+ xdp_prepare_buff(xdp, data, pp->rx_offset_correction + MVNETA_MH_SIZE,
+ data_len, false);
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
@@ -2363,9 +2363,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
u32 desc_status, frame_sz;
struct xdp_buff xdp_buf;
+ xdp_init_buff(&xdp_buf, PAGE_SIZE, &rxq->xdp_rxq);
xdp_buf.data_hard_start = NULL;
- xdp_buf.frame_sz = PAGE_SIZE;
- xdp_buf.rxq = &rxq->xdp_rxq;
sinfo.nr_frags = 0;
@@ -3432,7 +3431,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
return -ENOMEM;
/* Setup XPS mapping */
- if (txq_number > 1)
+ if (pp->neta_armada3700)
+ cpu = 0;
+ else if (txq_number > 1)
cpu = txq->id % num_present_cpus();
else
cpu = pp->rxq_def % num_present_cpus();
@@ -4210,6 +4211,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
node_online);
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+ /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
+ * are routed to CPU 0, so we don't need all the cpu-hotplug support
+ */
+ if (pp->neta_armada3700)
+ return 0;
spin_lock(&pp->lock);
/*
@@ -4919,6 +4925,63 @@ static int mvneta_ethtool_set_eee(struct net_device *dev,
return phylink_ethtool_set_eee(pp->phylink, eee);
}
+static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
+{
+ mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
+}
+
+static void mvneta_setup_rx_prio_map(struct mvneta_port *pp)
+{
+ u32 val = 0;
+ int i;
+
+ for (i = 0; i < rxq_number; i++)
+ val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]);
+
+ mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
+}
+
+static int mvneta_setup_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt *qopt)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ u8 num_tc;
+ int i;
+
+ qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = qopt->num_tc;
+
+ if (num_tc > rxq_number)
+ return -EINVAL;
+
+ if (!num_tc) {
+ mvneta_clear_rx_prio_map(pp);
+ netdev_reset_tc(dev);
+ return 0;
+ }
+
+ memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map));
+
+ mvneta_setup_rx_prio_map(pp);
+
+ netdev_set_num_tc(dev, qopt->num_tc);
+ for (i = 0; i < qopt->num_tc; i++)
+ netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]);
+
+ return 0;
+}
+
+static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return mvneta_setup_mqprio(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open,
.ndo_stop = mvneta_stop,
@@ -4931,6 +4994,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_do_ioctl = mvneta_ioctl,
.ndo_bpf = mvneta_xdp,
.ndo_xdp_xmit = mvneta_xdp_xmit,
+ .ndo_setup_tc = mvneta_setup_tc,
};
static const struct ethtool_ops mvneta_eth_tool_ops = {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 6bd7e405e830..8edba5ea90f0 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -60,6 +60,9 @@
/* Top Registers */
#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
#define MVPP2_DSA_EXTENDED BIT(5)
+#define MVPP2_VER_ID_REG 0x50b0
+#define MVPP2_VER_PP22 0x10
+#define MVPP2_VER_PP23 0x11
/* Parser Registers */
#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
@@ -292,6 +295,8 @@
#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
+#define MVPP2_ISR_RX_ERR_CAUSE_REG(port) (0x5520 + 4 * (port))
+#define MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK 0x00ff
/* Buffer Manager registers */
#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
@@ -319,6 +324,10 @@
#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000
#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \
MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_BPPI_HIGH_THRESH 0x1E
+#define MVPP2_BM_BPPI_LOW_THRESH 0x1C
+#define MVPP23_BM_BPPI_HIGH_THRESH 0x34
+#define MVPP23_BM_BPPI_LOW_THRESH 0x28
#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4))
#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0)
#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1)
@@ -347,6 +356,10 @@
#define MVPP2_OVERRUN_ETH_DROP 0x7000
#define MVPP2_CLS_ETH_DROP 0x7020
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_REG 0x6310
+#define MVPP22_BM_POOL_BASE_ADDR_HIGH_MASK 0xff
+#define MVPP23_BM_8POOL_MODE BIT(8)
+
/* Hit counters registers */
#define MVPP2_CTRS_IDX 0x7040
#define MVPP22_CTRS_TX_CTR(port, txq) ((txq) | ((port) << 3) | BIT(7))
@@ -469,7 +482,7 @@
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
-/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
+/* Per-port XGMAC registers. PPv2.2 and PPv2.3, only for GOP port 0,
* relative to port->base.
*/
#define MVPP22_XLG_CTRL0_REG 0x100
@@ -506,7 +519,7 @@
#define MVPP22_XLG_CTRL4_MACMODSELECT_GMAC BIT(12)
#define MVPP22_XLG_CTRL4_EN_IDLE_CHECK BIT(14)
-/* SMI registers. PPv2.2 only, relative to priv->iface_base. */
+/* SMI registers. PPv2.2 and PPv2.3, relative to priv->iface_base. */
#define MVPP22_SMI_MISC_CFG_REG 0x1204
#define MVPP22_SMI_POLLING_EN BIT(10)
@@ -582,7 +595,7 @@
#define MVPP2_QUEUE_NEXT_DESC(q, index) \
(((index) < (q)->last_desc) ? ((index) + 1) : 0)
-/* XPCS registers. PPv2.2 only */
+/* XPCS registers.PPv2.2 and PPv2.3 */
#define MVPP22_MPCS_BASE(port) (0x7000 + (port) * 0x1000)
#define MVPP22_MPCS_CTRL 0x14
#define MVPP22_MPCS_CTRL_FWD_ERR_CONN BIT(10)
@@ -593,7 +606,16 @@
#define MVPP22_MPCS_CLK_RESET_DIV_RATIO(n) ((n) << 4)
#define MVPP22_MPCS_CLK_RESET_DIV_SET BIT(11)
-/* XPCS registers. PPv2.2 only */
+/* FCA registers. PPv2.2 and PPv2.3 */
+#define MVPP22_FCA_BASE(port) (0x7600 + (port) * 0x1000)
+#define MVPP22_FCA_REG_SIZE 16
+#define MVPP22_FCA_REG_MASK 0xFFFF
+#define MVPP22_FCA_CONTROL_REG 0x0
+#define MVPP22_FCA_ENABLE_PERIODIC BIT(11)
+#define MVPP22_PERIODIC_COUNTER_LSB_REG (0x110)
+#define MVPP22_PERIODIC_COUNTER_MSB_REG (0x114)
+
+/* XPCS registers. PPv2.2 and PPv2.3 */
#define MVPP22_XPCS_BASE(port) (0x7400 + (port) * 0x1000)
#define MVPP22_XPCS_CFG0 0x0
#define MVPP22_XPCS_CFG0_RESET_DIS BIT(0)
@@ -651,9 +673,9 @@
#define GENCONF_PORT_CTRL1_EN(p) BIT(p)
#define GENCONF_PORT_CTRL1_RESET(p) (BIT(p) << 28)
#define GENCONF_CTRL0 0x1120
-#define GENCONF_CTRL0_PORT0_RGMII BIT(0)
-#define GENCONF_CTRL0_PORT1_RGMII_MII BIT(1)
-#define GENCONF_CTRL0_PORT1_RGMII BIT(2)
+#define GENCONF_CTRL0_PORT2_RGMII BIT(0)
+#define GENCONF_CTRL0_PORT3_RGMII_MII BIT(1)
+#define GENCONF_CTRL0_PORT3_RGMII BIT(2)
/* Various constants */
@@ -712,8 +734,8 @@
#define MVPP2_PORT_MAX_RXQ 32
/* Max number of Rx descriptors */
-#define MVPP2_MAX_RXD_MAX 1024
-#define MVPP2_MAX_RXD_DFLT 128
+#define MVPP2_MAX_RXD_MAX 2048
+#define MVPP2_MAX_RXD_DFLT 1024
/* Max number of Tx descriptors */
#define MVPP2_MAX_TXD_MAX 2048
@@ -741,13 +763,73 @@
#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80
/* TX FIFO constants */
-#define MVPP22_TX_FIFO_DATA_SIZE_16KB 16
+#define MVPP22_TX_FIFO_DATA_SIZE_18KB 18
#define MVPP22_TX_FIFO_DATA_SIZE_10KB 10
-#define MVPP22_TX_FIFO_DATA_SIZE_3KB 3
+#define MVPP22_TX_FIFO_DATA_SIZE_1KB 1
#define MVPP2_TX_FIFO_THRESHOLD_MIN 256 /* Bytes */
#define MVPP2_TX_FIFO_THRESHOLD(kb) \
((kb) * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+/* RX FIFO threshold in 1KB granularity */
+#define MVPP23_PORT0_FIFO_TRSH (9 * 1024)
+#define MVPP23_PORT1_FIFO_TRSH (4 * 1024)
+#define MVPP23_PORT2_FIFO_TRSH (2 * 1024)
+
+/* RX Flow Control Registers */
+#define MVPP2_RX_FC_REG(port) (0x150 + 4 * (port))
+#define MVPP2_RX_FC_EN BIT(24)
+#define MVPP2_RX_FC_TRSH_OFFS 16
+#define MVPP2_RX_FC_TRSH_MASK (0xFF << MVPP2_RX_FC_TRSH_OFFS)
+#define MVPP2_RX_FC_TRSH_UNIT 256
+
+/* MSS Flow control */
+#define MSS_FC_COM_REG 0
+#define FLOW_CONTROL_ENABLE_BIT BIT(0)
+#define FLOW_CONTROL_UPDATE_COMMAND_BIT BIT(31)
+#define FC_QUANTA 0xFFFF
+#define FC_CLK_DIVIDER 100
+
+#define MSS_RXQ_TRESH_BASE 0x200
+#define MSS_RXQ_TRESH_OFFS 4
+#define MSS_RXQ_TRESH_REG(q, fq) (MSS_RXQ_TRESH_BASE + (((q) + (fq)) \
+ * MSS_RXQ_TRESH_OFFS))
+
+#define MSS_BUF_POOL_BASE 0x40
+#define MSS_BUF_POOL_OFFS 4
+#define MSS_BUF_POOL_REG(id) (MSS_BUF_POOL_BASE \
+ + (id) * MSS_BUF_POOL_OFFS)
+
+#define MSS_BUF_POOL_STOP_MASK 0xFFF
+#define MSS_BUF_POOL_START_MASK (0xFFF << MSS_BUF_POOL_START_OFFS)
+#define MSS_BUF_POOL_START_OFFS 12
+#define MSS_BUF_POOL_PORTS_MASK (0xF << MSS_BUF_POOL_PORTS_OFFS)
+#define MSS_BUF_POOL_PORTS_OFFS 24
+#define MSS_BUF_POOL_PORT_OFFS(id) (0x1 << \
+ ((id) + MSS_BUF_POOL_PORTS_OFFS))
+
+#define MSS_RXQ_TRESH_START_MASK 0xFFFF
+#define MSS_RXQ_TRESH_STOP_MASK (0xFFFF << MSS_RXQ_TRESH_STOP_OFFS)
+#define MSS_RXQ_TRESH_STOP_OFFS 16
+
+#define MSS_RXQ_ASS_BASE 0x80
+#define MSS_RXQ_ASS_OFFS 4
+#define MSS_RXQ_ASS_PER_REG 4
+#define MSS_RXQ_ASS_PER_OFFS 8
+#define MSS_RXQ_ASS_PORTID_OFFS 0
+#define MSS_RXQ_ASS_PORTID_MASK 0x3
+#define MSS_RXQ_ASS_HOSTID_OFFS 2
+#define MSS_RXQ_ASS_HOSTID_MASK 0x3F
+
+#define MSS_RXQ_ASS_Q_BASE(q, fq) ((((q) + (fq)) % MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_PER_OFFS)
+#define MSS_RXQ_ASS_PQ_BASE(q, fq) ((((q) + (fq)) / MSS_RXQ_ASS_PER_REG) \
+ * MSS_RXQ_ASS_OFFS)
+#define MSS_RXQ_ASS_REG(q, fq) (MSS_RXQ_ASS_BASE + MSS_RXQ_ASS_PQ_BASE(q, fq))
+
+#define MSS_THRESHOLD_STOP 768
+#define MSS_THRESHOLD_START 1024
+#define MSS_FC_MAX_TIMEOUT 5000
+
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
@@ -845,8 +927,8 @@ enum mvpp22_ptp_packet_format {
#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18)
/* BM constants */
-#define MVPP2_BM_JUMBO_BUF_NUM 512
-#define MVPP2_BM_LONG_BUF_NUM 1024
+#define MVPP2_BM_JUMBO_BUF_NUM 2048
+#define MVPP2_BM_LONG_BUF_NUM 2048
#define MVPP2_BM_SHORT_BUF_NUM 2048
#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
#define MVPP2_BM_POOL_PTR_ALIGN 128
@@ -925,16 +1007,18 @@ struct mvpp2 {
/* Shared registers' base addresses */
void __iomem *lms_base;
void __iomem *iface_base;
+ void __iomem *cm3_base;
- /* On PPv2.2, each "software thread" can access the base
+ /* On PPv2.2 and PPv2.3, each "software thread" can access the base
* register through a separate address space, each 64 KB apart
* from each other. Typically, such address spaces will be
* used per CPU.
*/
void __iomem *swth_base[MVPP2_MAX_THREADS];
- /* On PPv2.2, some port control registers are located into the system
- * controller space. These registers are accessible through a regmap.
+ /* On PPv2.2 and PPv2.3, some port control registers are located into
+ * the system controller space. These registers are accessible
+ * through a regmap.
*/
struct regmap *sysctrl_base;
@@ -976,7 +1060,7 @@ struct mvpp2 {
u32 tclk;
/* HW version */
- enum { MVPP21, MVPP22 } hw_version;
+ enum { MVPP21, MVPP22, MVPP23 } hw_version;
/* Maximum number of RXQs per port */
unsigned int max_port_rxqs;
@@ -996,6 +1080,12 @@ struct mvpp2 {
/* page_pool allocator */
struct page_pool *page_pool[MVPP2_PORT_MAX_RXQ];
+
+ /* Global TX Flow Control config */
+ bool global_tx_fc;
+
+ /* Spinlocks for CM3 shared memory configuration */
+ spinlock_t mss_spinlock;
};
struct mvpp2_pcpu_stats {
@@ -1158,6 +1248,9 @@ struct mvpp2_port {
bool rx_hwtstamp;
enum hwtstamp_tx_types tx_hwtstamp_type;
struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2];
+
+ /* Firmware TX flow control */
+ bool tx_fc;
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -1220,7 +1313,7 @@ struct mvpp21_rx_desc {
__le32 reserved8;
};
-/* HW TX descriptor for PPv2.2 */
+/* HW TX descriptor for PPv2.2 and PPv2.3 */
struct mvpp22_tx_desc {
__le32 command;
u8 packet_offset;
@@ -1232,7 +1325,7 @@ struct mvpp22_tx_desc {
__le64 buf_cookie_misc;
};
-/* HW RX descriptor for PPv2.2 */
+/* HW RX descriptor for PPv2.2 and PPv2.3 */
struct mvpp22_rx_desc {
__le32 status;
__le16 reserved1;
@@ -1418,6 +1511,8 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
+
#ifdef CONFIG_MVPP2_PTP
int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
@@ -1450,4 +1545,5 @@ static inline bool mvpp22_rx_hwtstamping(struct mvpp2_port *port)
{
return IS_ENABLED(CONFIG_MVPP2_PTP) && port->rx_hwtstamp;
}
+
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 8867f25afab4..663157dc8062 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -143,7 +143,7 @@ struct mvpp2_cls_c2_entry {
/* Number of per-port dedicated entries in the C2 TCAM */
#define MVPP22_CLS_C2_PORT_N_FLOWS MVPP2_N_RFS_ENTRIES_PER_FLOW
-/* Each port has oen range per flow type + one entry controling the global RSS
+/* Each port has one range per flow type + one entry controlling the global RSS
* setting and the default rx queue
*/
#define MVPP22_CLS_C2_PORT_RANGE (MVPP22_CLS_C2_PORT_N_FLOWS + 1)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 358119d98358..0507369bb54d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -91,6 +91,16 @@ static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
return cpu % priv->nthreads;
}
+static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->cm3_base + offset);
+}
+
+static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->cm3_base + offset);
+}
+
static struct page_pool *
mvpp2_create_page_pool(struct device *dev, int num, int len,
enum dma_data_direction dma_dir)
@@ -319,7 +329,7 @@ static int mvpp2_get_nrxqs(struct mvpp2 *priv)
{
unsigned int nrxqs;
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
return 1;
/* According to the PPv2.2 datasheet and our experiments on
@@ -384,7 +394,7 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
if (!IS_ALIGNED(size, 16))
return -EINVAL;
- /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
* bytes per buffer pointer
*/
if (priv->hw_version == MVPP21)
@@ -413,6 +423,19 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
val |= MVPP2_BM_START_MASK;
+
+ val &= ~MVPP2_BM_LOW_THRESH_MASK;
+ val &= ~MVPP2_BM_HIGH_THRESH_MASK;
+
+ /* Set 8 Pools BPPI threshold for MVPP23 */
+ if (priv->hw_version == MVPP23) {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
+ } else {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
+ }
+
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
bm_pool->size = size;
@@ -446,7 +469,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
*phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
u32 val;
u32 dma_addr_highbits, phys_addr_highbits;
@@ -581,6 +604,16 @@ err_unroll_pools:
return err;
}
+/* Routine enable PPv23 8 pool mode */
+static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
+ val |= MVPP23_BM_8POOL_MODE;
+ mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+}
+
static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
@@ -634,6 +667,9 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
if (!priv->bm_pools)
return -ENOMEM;
+ if (priv->hw_version == MVPP23)
+ mvpp23_bm_set_8pool_mode(priv);
+
err = mvpp2_bm_pools_init(dev, priv);
if (err < 0)
return err;
@@ -731,6 +767,210 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
return data;
}
+/* Routine enable flow control for RXQs condition */
+static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, host_id, q;
+ int fq = port->first_rxq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control was enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Set same Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set stop and start Flow control RXQ thresholds */
+ val = MSS_THRESHOLD_START;
+ val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+ /* Set RXQ port ID */
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ /* Calculate RXQ host ID:
+ * In Single queue mode: Host ID equal to Host ID used for
+ * shared RX interrupt
+ * In Multi queue mode: Host ID equal to number of
+ * RXQ ID / number of CoS queues
+ * In Single resource mode: Host ID always equal to 0
+ */
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ host_id = port->nqvecs;
+ else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
+ host_id = q;
+ else
+ host_id = 0;
+
+ /* Set RXQ host ID */
+ val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable flow control for RXQs condition */
+static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, q;
+ unsigned long flags;
+ int fq = port->first_rxq;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control was enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Disable Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set threshold 0 to disable Flow control */
+ val = 0;
+ val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable/enable flow control for BM pool condition */
+static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *pool,
+ bool en)
+{
+ int val, cm3_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Check if BM pool should be enabled/disable */
+ if (en) {
+ /* Set BM pool start and stop thresholds per port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val |= MSS_BUF_POOL_PORT_OFFS(port->id);
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ val |= MSS_THRESHOLD_STOP;
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ } else {
+ /* Remove BM pool from the port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
+
+ /* Zero BM pool start and stop thresholds to disable pool
+ * flow control if pool empty (not used by any port)
+ */
+ if (!pool->buf_num) {
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ }
+
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* disable/enable flow control for BM pool on all ports */
+static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
+{
+ struct mvpp2_port *port;
+ int i;
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ port->tx_fc & en);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
+ }
+ }
+}
+
+static int mvpp2_enable_global_fc(struct mvpp2 *priv)
+{
+ int val, timeout = 0;
+
+ /* Enable global flow control. In this stage global
+ * flow control enabled, but still disabled per port.
+ */
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ /* Check if Firmware running and disable FC if not*/
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ while (timeout < MSS_FC_MAX_TIMEOUT) {
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+
+ if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
+ return 0;
+ usleep_range(10, 20);
+ timeout++;
+ }
+
+ priv->global_tx_fc = false;
+ return -EOPNOTSUPP;
+}
+
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
@@ -742,7 +982,7 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
if (test_bit(thread, &port->priv->lock_map))
spin_lock_irqsave(&port->bm_lock[thread], flags);
- if (port->priv->hw_version == MVPP22) {
+ if (port->priv->hw_version >= MVPP22) {
u32 val = 0;
if (sizeof(dma_addr_t) == 8)
@@ -1061,6 +1301,16 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
new_long_pool = MVPP2_BM_LONG;
if (new_long_pool != port->pool_long->id) {
+ if (port->tx_fc) {
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_short,
+ false);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ false);
+ }
+
/* Remove port from old short & long pool */
port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
port->pool_long->pkt_size);
@@ -1078,6 +1328,25 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
mvpp2_swf_bm_pool_init(port);
mvpp2_set_hw_csum(port, new_long_pool);
+
+ if (port->tx_fc) {
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ true);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_short,
+ true);
+ }
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
}
out_set:
@@ -1133,14 +1402,19 @@ static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
+ int cpu = smp_processor_id();
+ u32 thread;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (cpu > port->priv->nthreads)
return;
- mvpp2_thread_write(port->priv,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ thread = mvpp2_cpu_to_thread(port->priv, cpu);
+
+ mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
}
/* Unmask the current thread's Rx/Tx interrupts.
@@ -1150,20 +1424,25 @@ static void mvpp2_interrupts_mask(void *arg)
static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
- u32 val;
+ int cpu = smp_processor_id();
+ u32 val, thread;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (cpu >= port->priv->nthreads)
return;
+ thread = mvpp2_cpu_to_thread(port->priv, cpu);
+
val = MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_thread_write(port->priv,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
static void
@@ -1172,7 +1451,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
u32 val;
int i;
- if (port->priv->hw_version != MVPP22)
+ if (port->priv->hw_version == MVPP21)
return;
if (mask)
@@ -1188,6 +1467,9 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
}
@@ -1199,7 +1481,7 @@ static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
{
- return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
+ return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
}
/* Port configuration routines */
@@ -1231,9 +1513,9 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII;
+ val |= GENCONF_CTRL0_PORT2_RGMII;
else if (port->gop_id == 3)
- val |= GENCONF_CTRL0_PORT1_RGMII_MII;
+ val |= GENCONF_CTRL0_PORT3_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
@@ -1250,9 +1532,9 @@ static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
if (port->gop_id > 1) {
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val &= ~GENCONF_CTRL0_PORT0_RGMII;
+ val &= ~GENCONF_CTRL0_PORT2_RGMII;
else if (port->gop_id == 3)
- val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
+ val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
}
@@ -1280,6 +1562,49 @@ static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
}
+static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 val;
+
+ val = readl(fca + MVPP22_FCA_CONTROL_REG);
+ val &= ~MVPP22_FCA_ENABLE_PERIODIC;
+ if (en)
+ val |= MVPP22_FCA_ENABLE_PERIODIC;
+ writel(val, fca + MVPP22_FCA_CONTROL_REG);
+}
+
+static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 lsb, msb;
+
+ lsb = timer & MVPP22_FCA_REG_MASK;
+ msb = timer >> MVPP22_FCA_REG_SIZE;
+
+ writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
+ writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
+}
+
+/* Set Flow Control timer x100 faster than pause quanta to ensure that link
+ * partner won't send traffic if port is in XOFF mode.
+ */
+static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
+{
+ u32 timer;
+
+ timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
+ * FC_QUANTA;
+
+ mvpp22_gop_fca_enable_periodic(port, false);
+
+ mvpp22_gop_fca_set_timer(port, timer);
+
+ mvpp22_gop_fca_enable_periodic(port, true);
+}
+
static int mvpp22_gop_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
@@ -1324,6 +1649,8 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
val |= GENCONF_SOFT_RESET1_GOP;
regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
+ mvpp22_gop_fca_set_periodic_timer(port);
+
unsupported_conf:
return 0;
@@ -1817,7 +2144,7 @@ static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
MVPP2_GMAC_PORT_RESET_MASK;
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+ if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
@@ -1830,7 +2157,7 @@ static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
void __iomem *mpcs, *xpcs;
u32 val;
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+ if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
return;
mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
@@ -1851,7 +2178,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
void __iomem *mpcs, *xpcs;
u32 val;
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+ if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
return;
mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
@@ -2287,7 +2614,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
int queue;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (smp_processor_id() >= port->priv->nthreads)
return;
for (queue = 0; queue < port->ntxqs; queue++) {
@@ -2348,6 +2675,20 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
}
}
+/* Set the number of non-occupied descriptors threshold */
+static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ u32 val;
+
+ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
+ val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
+ val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
+ mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+}
+
/* Set the number of packets that will be received before Rx interrupt
* will be generated by HW.
*/
@@ -2611,6 +2952,9 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rx_pkts_coal_set(port, rxq);
mvpp2_rx_time_coal_set(port, rxq);
+ /* Set the number of non occupied descriptors threshold */
+ mvpp2_set_rxq_free_tresh(port, rxq);
+
/* Add number of descriptors ready for receiving packets */
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
@@ -2928,6 +3272,9 @@ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
for (queue = 0; queue < port->nrxqs; queue++)
mvpp2_rxq_deinit(port, port->rxqs[queue]);
+
+ if (port->tx_fc)
+ mvpp2_rxq_disable_fc(port);
}
/* Init all Rx queues for port */
@@ -2940,6 +3287,10 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port)
if (err)
goto err_cleanup;
}
+
+ if (port->tx_fc)
+ mvpp2_rxq_enable_fc(port);
+
return 0;
err_cleanup:
@@ -3563,17 +3914,17 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
frag_size = bm_pool->frag_size;
if (xdp_prog) {
- xdp.data_hard_start = data;
- xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
- xdp.data_end = xdp.data + rx_bytes;
- xdp.frame_sz = PAGE_SIZE;
+ struct xdp_rxq_info *xdp_rxq;
if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
- xdp.rxq = &rxq->xdp_rxq_short;
+ xdp_rxq = &rxq->xdp_rxq_short;
else
- xdp.rxq = &rxq->xdp_rxq_long;
+ xdp_rxq = &rxq->xdp_rxq_long;
- xdp_set_data_meta_invalid(&xdp);
+ xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
+ xdp_prepare_buff(&xdp, data,
+ MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
+ rx_bytes, false);
ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
@@ -3684,7 +4035,7 @@ static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
struct mvpp2_tx_desc *desc)
{
/* We only need to clear the low bits */
- if (port->priv->hw_version != MVPP21)
+ if (port->priv->hw_version >= MVPP22)
desc->pp22.ptp_descriptor &=
cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
}
@@ -4196,7 +4547,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
/* Enable interrupts on all threads */
mvpp2_interrupts_enable(port);
- if (port->priv->hw_version == MVPP22)
+ if (port->priv->hw_version >= MVPP22)
mvpp22_mode_reconfigure(port);
if (port->phylink) {
@@ -4239,6 +4590,8 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
new_rx_pending = MVPP2_MAX_RXD_MAX;
+ else if (ring->rx_pending < MSS_THRESHOLD_START)
+ new_rx_pending = MSS_THRESHOLD_START;
else if (!IS_ALIGNED(ring->rx_pending, 16))
new_rx_pending = ALIGN(ring->rx_pending, 16);
@@ -4412,7 +4765,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->port_irq) {
+ if (priv->hw_version >= MVPP22 && port->port_irq) {
err = request_irq(port->port_irq, mvpp2_port_isr, 0,
dev->name, port);
if (err) {
@@ -4579,6 +4932,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
*/
static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
{
+ bool change_percpu = (percpu != priv->percpu_pools);
int numbufs = MVPP2_BM_POOLS_NUM, i;
struct mvpp2_port *port = NULL;
bool status[MVPP2_MAX_PORTS];
@@ -4594,6 +4948,9 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
if (priv->percpu_pools)
numbufs = port->nrxqs * 2;
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, false);
+
for (i = 0; i < numbufs; i++)
mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
@@ -4608,6 +4965,9 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
mvpp2_open(port->dev);
}
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, true);
+
return 0;
}
@@ -5464,7 +5824,7 @@ static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
return;
}
- /* Handle the more complicated PPv2.2 case */
+ /* Handle the more complicated PPv2.2 and PPv2.3 case */
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
@@ -5641,7 +6001,7 @@ static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
/* Checks if the port dt description has the required Tx interrupts:
* - PPv2.1: there are no such interrupts.
- * - PPv2.2:
+ * - PPv2.2 and PPv2.3:
* - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
* - The new ones have: "hifX" with X in [0..8]
*
@@ -5883,6 +6243,11 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
+ if (port->priv->global_tx_fc) {
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ }
+
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
@@ -5973,7 +6338,7 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -6060,7 +6425,7 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
MVPP2_GMAC_PORT_RESET_MASK,
MVPP2_GMAC_PORT_RESET_MASK);
- if (port->priv->hw_version == MVPP22) {
+ if (port->priv->hw_version >= MVPP22) {
mvpp22_gop_mask_irq(port);
phy_power_off(port->comphy);
@@ -6114,7 +6479,7 @@ static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- if (port->priv->hw_version == MVPP22 &&
+ if (port->priv->hw_version >= MVPP22 &&
port->phy_interface != interface) {
port->phy_interface = interface;
@@ -6162,6 +6527,7 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
u32 val;
+ int i;
if (mvpp2_is_xlg(interface)) {
if (!phylink_autoneg_inband(mode)) {
@@ -6212,6 +6578,23 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
val);
}
+ if (port->priv->global_tx_fc) {
+ port->tx_fc = tx_pause;
+ if (tx_pause)
+ mvpp2_rxq_enable_fc(port);
+ else
+ mvpp2_rxq_disable_fc(port);
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
+ }
+ if (port->priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
+ }
+
mvpp2_port_enable(port);
mvpp2_egress_enable(port);
@@ -6629,7 +7012,7 @@ static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
}
-/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2.
+/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
* 4kB fixed space must be assigned for the loopback port.
* Redistribute remaining avialable 44kB space among all active ports.
* Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
@@ -6678,6 +7061,55 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
+/* Configure Rx FIFO Flow control thresholds */
+static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
+{
+ int port, val;
+
+ /* Port 0: maximum speed -10Gb/s port
+ * required by spec RX FIFO threshold 9KB
+ * Port 1: maximum speed -5Gb/s port
+ * required by spec RX FIFO threshold 4KB
+ * Port 2: maximum speed -1Gb/s port
+ * required by spec RX FIFO threshold 2KB
+ */
+
+ /* Without loopback port */
+ for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
+ if (port == 0) {
+ val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else if (port == 1) {
+ val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else {
+ val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ }
+ }
+}
+
+/* Configure Rx FIFO Flow control thresholds */
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
+
+ if (en)
+ val |= MVPP2_RX_FC_EN;
+ else
+ val &= ~MVPP2_RX_FC_EN;
+
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+}
+
static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
{
int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
@@ -6686,9 +7118,9 @@ static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
}
-/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2.
- * 3kB fixed space must be assigned for the loopback port.
- * Redistribute remaining avialable 16kB space among all active ports.
+/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
+ * 1kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 18kB space among all active ports.
* The 10G interface should use 10kB (which is maximum possible size
* per single port).
*/
@@ -6699,9 +7131,9 @@ static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
int size_remainder;
int port, size;
- /* The loopback requires fixed 3kB of the FIFO space assignment. */
+ /* The loopback requires fixed 1kB of the FIFO space assignment. */
mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
- MVPP22_TX_FIFO_DATA_SIZE_3KB);
+ MVPP22_TX_FIFO_DATA_SIZE_1KB);
port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
/* Set TX FIFO size to 0 for inactive ports. */
@@ -6709,7 +7141,7 @@ static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
mvpp22_tx_fifo_set_hw(priv, port, 0);
/* Assign remaining TX FIFO space among all active ports. */
- size_remainder = MVPP22_TX_FIFO_DATA_SIZE_16KB;
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
remaining_ports_count = hweight_long(port_map);
for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
@@ -6794,7 +7226,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
if (dram_target_info)
mvpp2_conf_mbus_windows(dram_target_info, priv);
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
mvpp2_axi_init(priv);
/* Disable HW PHY polling */
@@ -6829,6 +7261,8 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
} else {
mvpp22_rx_fifo_init(priv);
mvpp22_tx_fifo_init(priv);
+ if (priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_set_tresh(priv);
}
if (priv->hw_version == MVPP21)
@@ -6854,6 +7288,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
return 0;
}
+static int mvpp2_get_sram(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ if (has_acpi_companion(&pdev->dev))
+ dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
+ else
+ dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
+ return 0;
+ }
+
+ priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
+
+ return PTR_ERR_OR_ZERO(priv->cm3_base);
+}
+
static int mvpp2_probe(struct platform_device *pdev)
{
const struct acpi_device_id *acpi_id;
@@ -6910,9 +7363,18 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->iface_base))
return PTR_ERR(priv->iface_base);
+
+ /* Map CM3 SRAM */
+ err = mvpp2_get_sram(pdev, priv);
+ if (err)
+ dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
+
+ /* Enable global Flow Control only if handler to SRAM not NULL */
+ if (priv->cm3_base)
+ priv->global_tx_fc = true;
}
- if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
+ if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
priv->sysctrl_base =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
@@ -6925,7 +7387,7 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->sysctrl_base = NULL;
}
- if (priv->hw_version == MVPP22 &&
+ if (priv->hw_version >= MVPP22 &&
mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
priv->percpu_pools = 1;
@@ -6970,7 +7432,7 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_pp_clk;
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
if (IS_ERR(priv->mg_clk)) {
err = PTR_ERR(priv->mg_clk);
@@ -7011,7 +7473,7 @@ static int mvpp2_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
if (err)
goto err_axi_clk;
@@ -7031,6 +7493,12 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->port_map |= BIT(i);
}
+ if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
+ priv->hw_version = MVPP23;
+
+ /* Init mss lock */
+ spin_lock_init(&priv->mss_spinlock);
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
@@ -7070,6 +7538,12 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
+ err = mvpp2_enable_global_fc(priv);
+ if (err)
+ dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
+ }
+
mvpp2_dbgfs_init(priv, pdev->name);
platform_set_drvdata(pdev, priv);
@@ -7086,10 +7560,10 @@ err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
clk_disable_unprepare(priv->mg_core_clk);
err_mg_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index dd590086fe6a..4812cdb4609e 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -914,15 +914,15 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = tid;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
@@ -931,7 +931,8 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
MVPP2_PRS_TCAM_PROTO_MASK);
mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -999,12 +1000,17 @@ static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
return -EINVAL;
}
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1166,6 +1172,21 @@ static void mvpp2_prs_mh_init(struct mvpp2 *priv)
/* Update shadow table and hw entry */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set MH entry that skip parser */
+ pe.index = MVPP2_PE_MH_SKIP_PRS;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+ mvpp2_prs_hw_write(priv, &pe);
}
/* Set default entires (place holder) for promiscuous, non-promiscuous and
@@ -1425,8 +1446,9 @@ static int mvpp2_prs_etype_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1630,8 +1652,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IP header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* goto ipv4 dest-address (skip eth_type + IP-header-size - 4) */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN +
+ sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
@@ -1761,19 +1784,20 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_PROTO_UN;
- /* Set next lu to IPv4 */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
- mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* Finished: go to flowid generation */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
/* Set L4 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
sizeof(struct iphdr) - 4,
MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
- mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
MVPP2_PRS_RI_L4_PROTO_MASK);
- mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
@@ -1786,14 +1810,19 @@ static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
pe.index = MVPP2_PE_IP4_ADDR_UN;
- /* Finished: go to flowid generation */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
- mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ /* Go again to ipv4 */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+
+ mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+ MVPP2_PRS_IPV4_DIP_AI_BIT);
+
+ /* Shift back to IPv4 proto */
+ mvpp2_prs_sram_shift_set(&pe, -12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
MVPP2_PRS_RI_L3_ADDR_MASK);
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
- mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
- MVPP2_PRS_IPV4_DIP_AI_BIT);
/* Unmask all ports */
mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index 4b68dd374733..c16e5b9947bd 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -103,10 +103,11 @@
#define MVPP2_PE_MAC_RANGE_START (MVPP2_PE_MAC_RANGE_END - \
MVPP2_PRS_MAC_RANGE_SIZE + 1)
/* VLAN filtering range */
-#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 32)
#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_MAC_RANGE_START - 1)
+#define MVPP2_PE_MH_SKIP_PRS (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index eb535c98ca38..1a3455620b38 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -4,10 +4,10 @@
#
ccflags-y += -I$(src)
-obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
-obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
+obj-$(CONFIG_OCTEONTX2_MBOX) += rvu_mbox.o
+obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
-octeontx2_mbox-y := mbox.o rvu_trace.o
-octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
+rvu_mbox-y := mbox.o rvu_trace.o
+rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
- rvu_cpt.o rvu_devlink.o
+ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 1a8f5a039d50..9caa375d01b1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -14,53 +14,18 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "cgx.h"
+#include "rvu.h"
+#include "lmac_common.h"
-#define DRV_NAME "octeontx2-cgx"
-#define DRV_STRING "Marvell OcteonTX2 CGX/MAC Driver"
-
-/**
- * struct lmac
- * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
- * @cmd_lock: Lock to serialize the command interface
- * @resp: command response
- * @link_info: link related information
- * @event_cb: callback for linkchange events
- * @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
- * @cgx: parent cgx port
- * @lmac_id: lmac port id
- * @name: lmac port name
- */
-struct lmac {
- wait_queue_head_t wq_cmd_cmplt;
- struct mutex cmd_lock;
- u64 resp;
- struct cgx_link_user_info link_info;
- struct cgx_event_cb event_cb;
- spinlock_t event_cb_lock;
- bool cmd_pend;
- struct cgx *cgx;
- u8 lmac_id;
- char *name;
-};
-
-struct cgx {
- void __iomem *reg_base;
- struct pci_dev *pdev;
- u8 cgx_id;
- u8 lmac_count;
- struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
- struct work_struct cgx_cmd_work;
- struct workqueue_struct *cgx_cmd_workq;
- struct list_head cgx_list;
-};
+#define DRV_NAME "Marvell-CGX/RPM"
+#define DRV_STRING "Marvell CGX/RPM Driver"
static LIST_HEAD(cgx_list);
@@ -76,22 +41,45 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
/* Supported devices */
static const struct pci_device_id cgx_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
{ 0, } /* end of table */
};
MODULE_DEVICE_TABLE(pci, cgx_id_table);
-static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+static bool is_dev_rpm(void *cgxd)
{
- writeq(val, cgx->reg_base + (lmac << 18) + offset);
+ struct cgx *cgx = cgxd;
+
+ return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
+}
+
+bool is_lmac_valid(struct cgx *cgx, int lmac_id)
+{
+ return cgx && test_bit(lmac_id, &cgx->lmac_bmap);
}
-static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+struct mac_ops *get_mac_ops(void *cgxd)
{
- return readq(cgx->reg_base + (lmac << 18) + offset);
+ if (!cgxd)
+ return cgxd;
+
+ return ((struct cgx *)cgxd)->mac_ops;
}
-static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
+{
+ writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
+}
+
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
+{
+ return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
+ offset);
+}
+
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{
if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
return NULL;
@@ -135,6 +123,20 @@ void *cgx_get_pdata(int cgx_id)
return NULL;
}
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ cgx_write(cgx_dev, lmac_id, offset, val);
+}
+
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+
+ return cgx_read(cgx_dev, lmac_id, offset);
+}
+
int cgx_get_cgxid(void *cgxd)
{
struct cgx *cgx = cgxd;
@@ -185,8 +187,10 @@ static u64 mac2u64 (u8 *mac_addr)
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
u64 cfg;
+ mac_ops = cgx_dev->mac_ops;
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
@@ -205,8 +209,11 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
u64 cfg;
+ mac_ops = cgx_dev->mac_ops;
+
cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -215,15 +222,16 @@ int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
return 0;
}
-static inline u8 cgx_get_lmac_type(struct cgx *cgx, int lmac_id)
+static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
{
+ struct cgx *cgx = cgxd;
u64 cfg;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -237,10 +245,10 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
u8 lmac_type;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
- lmac_type = cgx_get_lmac_type(cgx, lmac_id);
+ lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
if (enable)
@@ -262,11 +270,13 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
u64 cfg = 0;
if (!cgx)
return;
+ mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
@@ -324,7 +334,7 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
*rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
return 0;
@@ -334,18 +344,77 @@ int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
{
struct cgx *cgx = cgxd;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
*tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
return 0;
}
+u64 cgx_features_get(void *cgxd)
+{
+ return ((struct cgx *)cgxd)->hw_features;
+}
+
+static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
+{
+ if (!linfo->fec)
+ return 0;
+
+ switch (linfo->lmac_type_id) {
+ case LMAC_MODE_SGMII:
+ case LMAC_MODE_XAUI:
+ case LMAC_MODE_RXAUI:
+ case LMAC_MODE_QSGMII:
+ return 0;
+ case LMAC_MODE_10G_R:
+ case LMAC_MODE_25G_R:
+ case LMAC_MODE_100G_R:
+ case LMAC_MODE_USXGMII:
+ return 1;
+ case LMAC_MODE_40G_R:
+ return 4;
+ case LMAC_MODE_50G_R:
+ if (linfo->fec == OTX2_FEC_BASER)
+ return 2;
+ else
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
+{
+ int stats, fec_stats_count = 0;
+ int corr_reg, uncorr_reg;
+ struct cgx *cgx = cgxd;
+
+ if (!cgx || lmac_id >= cgx->lmac_count)
+ return -ENODEV;
+ fec_stats_count =
+ cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
+ if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
+ corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
+ uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
+ } else {
+ corr_reg = CGXX_SPUX_RSFEC_CORR;
+ uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
+ }
+ for (stats = 0; stats < fec_stats_count; stats++) {
+ rsp->fec_corr_blks +=
+ cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
+ rsp->fec_uncorr_blks +=
+ cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
+ }
+ return 0;
+}
+
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -362,7 +431,7 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
struct cgx *cgx = cgxd;
u64 cfg, last;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
@@ -377,13 +446,16 @@ int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
return !!(last & DATA_PKT_TX_EN);
}
-int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
- u8 *tx_pause, u8 *rx_pause)
+static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
@@ -394,13 +466,16 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
return 0;
}
-int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
- u8 tx_pause, u8 rx_pause)
+static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
+ u8 tx_pause, u8 rx_pause)
{
struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (is_dev_rpm(cgx))
+ return 0;
+
+ if (!is_lmac_valid(cgx, lmac_id))
return -ENODEV;
cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
@@ -424,11 +499,12 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
return 0;
}
-static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
+static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
{
+ struct cgx *cgx = cgxd;
u64 cfg;
- if (!cgx || lmac_id >= cgx->lmac_count)
+ if (!is_lmac_valid(cgx, lmac_id))
return;
if (enable) {
/* Enable receive pause frames */
@@ -486,6 +562,9 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
if (!cgx)
return;
+ if (is_dev_rpm(cgx))
+ return;
+
if (enable) {
/* Enable inbound PTP timestamping */
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
@@ -508,7 +587,7 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
}
/* CGX Firmware interface low level support */
-static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
struct cgx *cgx = lmac->cgx;
struct device *dev;
@@ -556,8 +635,7 @@ unlock:
return err;
}
-static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
- struct cgx *cgx, int lmac_id)
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
{
struct lmac *lmac;
int err;
@@ -592,6 +670,7 @@ static inline void cgx_link_usertable_init(void)
cgx_speed_mbps[CGX_LINK_25G] = 25000;
cgx_speed_mbps[CGX_LINK_40G] = 40000;
cgx_speed_mbps[CGX_LINK_50G] = 50000;
+ cgx_speed_mbps[CGX_LINK_80G] = 80000;
cgx_speed_mbps[CGX_LINK_100G] = 100000;
cgx_lmactype_string[LMAC_MODE_SGMII] = "SGMII";
@@ -606,6 +685,143 @@ static inline void cgx_link_usertable_init(void)
cgx_lmactype_string[LMAC_MODE_USXGMII] = "USXGMII";
}
+static int cgx_link_usertable_index_map(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return CGX_LINK_10M;
+ case SPEED_100:
+ return CGX_LINK_100M;
+ case SPEED_1000:
+ return CGX_LINK_1G;
+ case SPEED_2500:
+ return CGX_LINK_2HG;
+ case SPEED_5000:
+ return CGX_LINK_5G;
+ case SPEED_10000:
+ return CGX_LINK_10G;
+ case SPEED_20000:
+ return CGX_LINK_20G;
+ case SPEED_25000:
+ return CGX_LINK_25G;
+ case SPEED_40000:
+ return CGX_LINK_40G;
+ case SPEED_50000:
+ return CGX_LINK_50G;
+ case 80000:
+ return CGX_LINK_80G;
+ case SPEED_100000:
+ return CGX_LINK_100G;
+ case SPEED_UNKNOWN:
+ return CGX_LINK_NONE;
+ }
+ return CGX_LINK_NONE;
+}
+
+static void set_mod_args(struct cgx_set_link_mode_args *args,
+ u32 speed, u8 duplex, u8 autoneg, u64 mode)
+{
+ /* Fill default values incase of user did not pass
+ * valid parameters
+ */
+ if (args->duplex == DUPLEX_UNKNOWN)
+ args->duplex = duplex;
+ if (args->speed == SPEED_UNKNOWN)
+ args->speed = speed;
+ if (args->an == AUTONEG_UNKNOWN)
+ args->an = autoneg;
+ args->mode = mode;
+ args->ports = 0;
+}
+
+static void otx2_map_ethtool_link_modes(u64 bitmask,
+ struct cgx_set_link_mode_args *args)
+{
+ switch (bitmask) {
+ case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
+ set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
+ set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
+ set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
+ set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
+ set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
+ break;
+ case ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
+ set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
+ set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
+ set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
+ set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
+ set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
+ set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
+ set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
+ set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
+ break;
+ case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
+ set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
+ set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
+ break;
+ case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
+ set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
+ break;
+ default:
+ set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
+ break;
+ }
+}
+
static inline void link_status_user_format(u64 lstat,
struct cgx_link_user_info *linfo,
struct cgx *cgx, u8 lmac_id)
@@ -615,6 +831,8 @@ static inline void link_status_user_format(u64 lstat,
linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
+ linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
+ linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
@@ -642,6 +860,9 @@ static inline void cgx_link_change_handler(u64 lstat,
lmac->link_info = event.link_uinfo;
linfo = &lmac->link_info;
+ if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
+ return;
+
/* Ensure callback doesn't get unregistered until we finish it */
spin_lock(&lmac->event_cb_lock);
@@ -670,7 +891,8 @@ static inline bool cgx_cmdresp_is_linkevent(u64 event)
id = FIELD_GET(EVTREG_ID, event);
if (id == CGX_CMD_LINK_BRING_UP ||
- id == CGX_CMD_LINK_BRING_DOWN)
+ id == CGX_CMD_LINK_BRING_DOWN ||
+ id == CGX_CMD_MODE_CHANGE)
return true;
else
return false;
@@ -686,12 +908,16 @@ static inline bool cgx_event_is_linkevent(u64 event)
static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
{
+ u64 event, offset, clear_bit;
struct lmac *lmac = data;
struct cgx *cgx;
- u64 event;
cgx = lmac->cgx;
+ /* Clear SW_INT for RPM and CMR_INT for CGX */
+ offset = cgx->mac_ops->int_register;
+ clear_bit = cgx->mac_ops->int_ena_bit;
+
event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
if (!FIELD_GET(EVTREG_ACK, event))
@@ -727,7 +953,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
* Ack the interrupt register as well.
*/
cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
- cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
+ cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
return IRQ_HANDLED;
}
@@ -771,20 +997,79 @@ int cgx_get_fwdata_base(u64 *base)
{
u64 req = 0, resp;
struct cgx *cgx;
+ int first_lmac;
int err;
cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
if (!cgx)
return -ENXIO;
+ first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
- err = cgx_fwi_cmd_generic(req, &resp, cgx, 0);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err)
*base = FIELD_GET(RESP_FWD_BASE, resp);
return err;
}
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ if (args.mode)
+ otx2_map_ethtool_link_modes(args.mode, &args);
+ if (!args.speed && args.duplex && !args.an)
+ return -EINVAL;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
+ req = FIELD_SET(CMDMODECHANGE_SPEED,
+ cgx_link_usertable_index_map(args.speed), req);
+ req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
+ req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
+ req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
+ req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
+
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
+{
+ u64 req = 0, resp;
+ struct cgx *cgx;
+ int err = 0;
+
+ cgx = cgx_get_pdata(cgx_id);
+ if (!cgx)
+ return -ENXIO;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
+ req = FIELD_SET(CMDSETFEC, fec, req);
+ err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+ if (err)
+ return err;
+
+ cgx->lmac_idmap[lmac_id]->link_info.fec =
+ FIELD_GET(RESP_LINKSTAT_FEC, resp);
+ return cgx->lmac_idmap[lmac_id]->link_info.fec;
+}
+
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ u64 req = 0, resp;
+
+ if (!cgx)
+ return -ENODEV;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
+ return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
+}
+
static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
{
u64 req = 0;
@@ -800,10 +1085,11 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{
+ int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
- return cgx_fwi_cmd_generic(req, resp, cgx, 0);
+ return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
}
static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
@@ -836,8 +1122,8 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
struct device *dev = &cgx->pdev->dev;
int i, err;
- /* Do Link up for all the lmacs */
- for (i = 0; i < cgx->lmac_count; i++) {
+ /* Do Link up for all the enabled lmacs */
+ for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
err = cgx_fwi_link_change(cgx, i, true);
if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n",
@@ -857,17 +1143,82 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0;
}
+static void cgx_lmac_get_fifolen(struct cgx *cgx)
+{
+ u64 cfg;
+
+ cfg = cgx_read(cgx, 0, CGX_CONST);
+ cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
+}
+
+static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
+ int cnt, bool req_free)
+{
+ struct mac_ops *mac_ops = cgx->mac_ops;
+ u64 offset, ena_bit;
+ unsigned int irq;
+ int err;
+
+ irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
+ cnt * mac_ops->irq_offset);
+ offset = mac_ops->int_set_reg;
+ ena_bit = mac_ops->int_ena_bit;
+
+ if (req_free) {
+ free_irq(irq, lmac);
+ return 0;
+ }
+
+ err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
+ if (err)
+ return err;
+
+ /* Enable interrupt */
+ cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
+ return 0;
+}
+
+int cgx_get_nr_lmacs(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
+}
+
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_idmap[lmac_index]->lmac_id;
+}
+
+unsigned long cgx_get_lmac_bmap(void *cgxd)
+{
+ struct cgx *cgx = cgxd;
+
+ return cgx->lmac_bmap;
+}
+
static int cgx_lmac_init(struct cgx *cgx)
{
struct lmac *lmac;
+ u64 lmac_list;
int i, err;
- cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
+ cgx_lmac_get_fifolen(cgx);
+
+ cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
+ /* lmac_list specifies which lmacs are enabled
+ * when bit n is set to 1, LMAC[n] is enabled
+ */
+ if (cgx->mac_ops->non_contiguous_serdes_lane)
+ lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
+
if (cgx->lmac_count > MAX_LMAC_PER_CGX)
cgx->lmac_count = MAX_LMAC_PER_CGX;
for (i = 0; i < cgx->lmac_count; i++) {
- lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
+ lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
@@ -876,24 +1227,25 @@ static int cgx_lmac_init(struct cgx *cgx)
goto err_lmac_free;
}
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
- lmac->lmac_id = i;
+ if (cgx->mac_ops->non_contiguous_serdes_lane) {
+ lmac->lmac_id = __ffs64(lmac_list);
+ lmac_list &= ~BIT_ULL(lmac->lmac_id);
+ } else {
+ lmac->lmac_id = i;
+ }
+
lmac->cgx = cgx;
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
- err = request_irq(pci_irq_vector(cgx->pdev,
- CGX_LMAC_FWI + i * 9),
- cgx_fwi_event_handler, 0, lmac->name, lmac);
+ err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
if (err)
goto err_irq;
- /* Enable interrupt */
- cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
- FW_CGX_INT);
-
/* Add reference */
- cgx->lmac_idmap[i] = lmac;
- cgx_lmac_pause_frm_config(cgx, i, true);
+ cgx->lmac_idmap[lmac->lmac_id] = lmac;
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
+ set_bit(lmac->lmac_id, &cgx->lmac_bmap);
}
return cgx_lmac_verify_fwi_version(cgx);
@@ -917,12 +1269,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
}
/* Free all lmac related resources */
- for (i = 0; i < cgx->lmac_count; i++) {
- cgx_lmac_pause_frm_config(cgx, i, false);
+ for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
lmac = cgx->lmac_idmap[i];
if (!lmac)
continue;
- free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
+ cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
+ cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
kfree(lmac->name);
kfree(lmac);
}
@@ -930,6 +1282,37 @@ static int cgx_lmac_exit(struct cgx *cgx)
return 0;
}
+static void cgx_populate_features(struct cgx *cgx)
+{
+ if (is_dev_rpm(cgx))
+ cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
+ else
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+}
+
+static struct mac_ops cgx_mac_ops = {
+ .name = "cgx",
+ .csr_offset = 0,
+ .lmac_offset = 18,
+ .int_register = CGXX_CMRX_INT,
+ .int_set_reg = CGXX_CMRX_INT_ENA_W1S,
+ .irq_offset = 9,
+ .int_ena_bit = FW_CGX_INT,
+ .lmac_fwi = CGX_LMAC_FWI,
+ .non_contiguous_serdes_lane = false,
+ .rx_stats_cnt = 9,
+ .tx_stats_cnt = 18,
+ .get_nr_lmacs = cgx_get_nr_lmacs,
+ .get_lmac_type = cgx_get_lmac_type,
+ .mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
+ .mac_get_rx_stats = cgx_get_rx_stats,
+ .mac_get_tx_stats = cgx_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = cgx_lmac_pause_frm_config,
+};
+
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -943,6 +1326,12 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_drvdata(pdev, cgx);
+ /* Use mac_ops to get MAC specific features */
+ if (pdev->device == PCI_DEVID_CN10K_RPM)
+ cgx->mac_ops = rpm_get_mac_ops();
+ else
+ cgx->mac_ops = &cgx_mac_ops;
+
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
@@ -964,7 +1353,7 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_release_regions;
}
- nvec = CGX_NVEC;
+ nvec = pci_msix_vec_count(cgx->pdev);
err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
if (err < 0 || err != nvec) {
dev_err(dev, "Request for %d msix vectors failed, err %d\n",
@@ -988,6 +1377,10 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
cgx_link_usertable_init();
+ cgx_populate_features(cgx);
+
+ mutex_init(&cgx->lock);
+
err = cgx_lmac_init(cgx);
if (err)
goto err_release_lmac;
@@ -1011,8 +1404,10 @@ static void cgx_remove(struct pci_dev *pdev)
{
struct cgx *cgx = pci_get_drvdata(pdev);
- cgx_lmac_exit(cgx);
- list_del(&cgx->cgx_list);
+ if (cgx) {
+ cgx_lmac_exit(cgx);
+ list_del(&cgx->cgx_list);
+ }
pci_free_irq_vectors(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index bcfc3e5f66bb..12521262164a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -13,6 +13,7 @@
#include "mbox.h"
#include "cgx_fw_if.h"
+#include "rpm.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_CGX 0xA059
@@ -42,12 +43,12 @@
#define CGXX_CMRX_RX_ID_MAP 0x060
#define CGXX_CMRX_RX_STAT0 0x070
#define CGXX_CMRX_RX_LMACS 0x128
-#define CGXX_CMRX_RX_DMAC_CTL0 0x1F8
+#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
-#define CGXX_CMRX_RX_DMAC_CAM0 0x200
+#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
@@ -55,7 +56,13 @@
#define CGXX_SCRATCH0_REG 0x1050
#define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000
+#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
#define CGXX_SPUX_CONTROL1 0x10000
+#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
+#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
+#define CGXX_SPUX_RSFEC_CORR 0x10088
+#define CGXX_SPUX_RSFEC_UNCORR 0x10090
+
#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14)
#define CGXX_GMP_PCS_MRX_CTL 0x30000
#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14)
@@ -81,7 +88,6 @@
#define CGX_CMD_TIMEOUT 2200 /* msecs */
#define DEFAULT_PAUSE_TIME 0x7FF
-#define CGX_NVEC 37
#define CGX_LMAC_FWI 0
enum cgx_nix_stat_type {
@@ -147,5 +153,16 @@ int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id);
-
+int cgx_set_fec(u64 fec, int cgx_id, int lmac_id);
+int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
+int cgx_get_phy_fec_stats(void *cgxd, int lmac_id);
+int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
+ int cgx_id, int lmac_id);
+u64 cgx_features_get(void *cgxd);
+struct mac_ops *get_mac_ops(void *cgxd);
+int cgx_get_nr_lmacs(void *cgxd);
+u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
+unsigned long cgx_get_lmac_bmap(void *cgxd);
+void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
+u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
index c3702fa58b6b..aa4e42f78f13 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h
@@ -43,7 +43,13 @@ enum cgx_error_type {
CGX_ERR_TRAINING_FAIL,
CGX_ERR_RX_EQU_FAIL,
CGX_ERR_SPUX_BER_FAIL,
- CGX_ERR_SPUX_RSFEC_ALGN_FAIL, /* = 22 */
+ CGX_ERR_SPUX_RSFEC_ALGN_FAIL,
+ CGX_ERR_SPUX_MARKER_LOCK_FAIL,
+ CGX_ERR_SET_FEC_INVALID,
+ CGX_ERR_SET_FEC_FAIL,
+ CGX_ERR_MODULE_INVALID,
+ CGX_ERR_MODULE_NOT_PRESENT,
+ CGX_ERR_SPEED_CHANGE_INVALID,
};
/* LINK speed types */
@@ -59,10 +65,41 @@ enum cgx_link_speed {
CGX_LINK_25G,
CGX_LINK_40G,
CGX_LINK_50G,
+ CGX_LINK_80G,
CGX_LINK_100G,
CGX_LINK_SPEED_MAX,
};
+enum CGX_MODE_ {
+ CGX_MODE_SGMII,
+ CGX_MODE_1000_BASEX,
+ CGX_MODE_QSGMII,
+ CGX_MODE_10G_C2C,
+ CGX_MODE_10G_C2M,
+ CGX_MODE_10G_KR,
+ CGX_MODE_20G_C2C,
+ CGX_MODE_25G_C2C,
+ CGX_MODE_25G_C2M,
+ CGX_MODE_25G_2_C2C,
+ CGX_MODE_25G_CR,
+ CGX_MODE_25G_KR,
+ CGX_MODE_40G_C2C,
+ CGX_MODE_40G_C2M,
+ CGX_MODE_40G_CR4,
+ CGX_MODE_40G_KR4,
+ CGX_MODE_40GAUI_C2C,
+ CGX_MODE_50G_C2C,
+ CGX_MODE_50G_C2M,
+ CGX_MODE_50G_4_C2C,
+ CGX_MODE_50G_CR,
+ CGX_MODE_50G_KR,
+ CGX_MODE_80GAUI_C2C,
+ CGX_MODE_100G_C2C,
+ CGX_MODE_100G_C2M,
+ CGX_MODE_100G_CR4,
+ CGX_MODE_100G_KR4,
+ CGX_MODE_MAX /* = 29 */
+};
/* REQUEST ID types. Input to firmware */
enum cgx_cmd_id {
CGX_CMD_NONE,
@@ -75,12 +112,25 @@ enum cgx_cmd_id {
CGX_CMD_INTERNAL_LBK,
CGX_CMD_EXTERNAL_LBK,
CGX_CMD_HIGIG,
- CGX_CMD_LINK_STATE_CHANGE,
+ CGX_CMD_LINK_STAT_CHANGE,
CGX_CMD_MODE_CHANGE, /* hot plug support */
CGX_CMD_INTF_SHUTDOWN,
CGX_CMD_GET_MKEX_PRFL_SIZE,
CGX_CMD_GET_MKEX_PRFL_ADDR,
CGX_CMD_GET_FWD_BASE, /* get base address of shared FW data */
+ CGX_CMD_GET_LINK_MODES, /* Supported Link Modes */
+ CGX_CMD_SET_LINK_MODE,
+ CGX_CMD_GET_SUPPORTED_FEC,
+ CGX_CMD_SET_FEC,
+ CGX_CMD_GET_AN,
+ CGX_CMD_SET_AN,
+ CGX_CMD_GET_ADV_LINK_MODES,
+ CGX_CMD_GET_ADV_FEC,
+ CGX_CMD_GET_PHY_MOD_TYPE, /* line-side modulation type: NRZ or PAM4 */
+ CGX_CMD_SET_PHY_MOD_TYPE,
+ CGX_CMD_PRBS,
+ CGX_CMD_DISPLAY_EYE,
+ CGX_CMD_GET_PHY_FEC_STATS,
};
/* async event ids */
@@ -154,6 +204,7 @@ enum cgx_cmd_own {
* CGX_STAT_SUCCESS
*/
#define RESP_FWD_BASE GENMASK_ULL(56, 9)
+#define RESP_LINKSTAT_LMAC_TYPE GENMASK_ULL(35, 28)
/* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE
* status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS
@@ -171,13 +222,19 @@ struct cgx_lnk_sts {
uint64_t full_duplex:1;
uint64_t speed:4; /* cgx_link_speed */
uint64_t err_type:10;
- uint64_t reserved2:39;
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled, if not 0 */
+ uint64_t port:8;
+ uint64_t reserved2:28;
};
#define RESP_LINKSTAT_UP GENMASK_ULL(9, 9)
#define RESP_LINKSTAT_FDUPLEX GENMASK_ULL(10, 10)
#define RESP_LINKSTAT_SPEED GENMASK_ULL(14, 11)
#define RESP_LINKSTAT_ERRTYPE GENMASK_ULL(24, 15)
+#define RESP_LINKSTAT_AN GENMASK_ULL(25, 25)
+#define RESP_LINKSTAT_FEC GENMASK_ULL(27, 26)
+#define RESP_LINKSTAT_PORT GENMASK_ULL(35, 28)
/* scratchx(1) CSR used for non-secure SW->ATF communication
* This CSR acts as a command register
@@ -199,4 +256,12 @@ struct cgx_lnk_sts {
#define CMDLINKCHANGE_FULLDPLX BIT_ULL(9)
#define CMDLINKCHANGE_SPEED GENMASK_ULL(13, 10)
+#define CMDSETFEC GENMASK_ULL(9, 8)
+/* command argument to be passed for cmd ID - CGX_CMD_MODE_CHANGE */
+#define CMDMODECHANGE_SPEED GENMASK_ULL(11, 8)
+#define CMDMODECHANGE_DUPLEX GENMASK_ULL(12, 12)
+#define CMDMODECHANGE_AN GENMASK_ULL(13, 13)
+#define CMDMODECHANGE_PORT GENMASK_ULL(21, 14)
+#define CMDMODECHANGE_FLAGS GENMASK_ULL(63, 22)
+
#endif /* __CGX_FW_INTF_H__ */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index 17f6f42f4453..e66109367487 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -155,6 +155,8 @@ enum nix_scheduler {
#define NIC_HW_MIN_FRS 40
#define NIC_HW_MAX_FRS 9212
#define SDP_HW_MAX_FRS 65535
+#define CN10K_LMAC_LINK_MAX_FRS 16380 /* 16k - FCS */
+#define CN10K_LBK_LINK_MAX_FRS 65535 /* 64k */
/* NIX RX action operation*/
#define NIX_RX_ACTIONOP_DROP (0x0ull)
@@ -191,6 +193,9 @@ enum nix_scheduler {
#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
+#define NIX_CHAN_SDP_CH_START (0x700ull)
+
+#define SDP_CHANNELS 256
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
new file mode 100644
index 000000000000..45706fd87120
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef LMAC_COMMON_H
+#define LMAC_COMMON_H
+
+#include "rvu.h"
+#include "cgx.h"
+/**
+ * struct lmac
+ * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
+ * @cmd_lock: Lock to serialize the command interface
+ * @resp: command response
+ * @link_info: link related information
+ * @event_cb: callback for linkchange events
+ * @event_cb_lock: lock for serializing callback with unregister
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
+ * @cgx: parent cgx port
+ * @lmac_id: lmac port id
+ * @name: lmac port name
+ */
+struct lmac {
+ wait_queue_head_t wq_cmd_cmplt;
+ /* Lock to serialize the command interface */
+ struct mutex cmd_lock;
+ u64 resp;
+ struct cgx_link_user_info link_info;
+ struct cgx_event_cb event_cb;
+ /* lock for serializing callback with unregister */
+ spinlock_t event_cb_lock;
+ bool cmd_pend;
+ struct cgx *cgx;
+ u8 lmac_id;
+ char *name;
+};
+
+/* CGX & RPM has different feature set
+ * update the structure fields with different one
+ */
+struct mac_ops {
+ char *name;
+ /* Features like RXSTAT, TXSTAT, DMAC FILTER csrs differs by fixed
+ * bar offset for example
+ * CGX DMAC_CTL0 0x1f8
+ * RPM DMAC_CTL0 0x4ff8
+ */
+ u64 csr_offset;
+ /* For ATF to send events to kernel, there is no dedicated interrupt
+ * defined hence CGX uses OVERFLOW bit in CMR_INT. RPM block supports
+ * SW_INT so that ATF triggers this interrupt after processing of
+ * requested command
+ */
+ u64 int_register;
+ u64 int_set_reg;
+ /* lmac offset is different is RPM */
+ u8 lmac_offset;
+ u8 irq_offset;
+ u8 int_ena_bit;
+ u8 lmac_fwi;
+ u32 fifo_len;
+ bool non_contiguous_serdes_lane;
+ /* RPM & CGX differs in number of Receive/transmit stats */
+ u8 rx_stats_cnt;
+ u8 tx_stats_cnt;
+ /* Incase of RPM get number of lmacs from RPMX_CMR_RX_LMACS[LMAC_EXIST]
+ * number of setbits in lmac_exist tells number of lmacs
+ */
+ int (*get_nr_lmacs)(void *cgx);
+ u8 (*get_lmac_type)(void *cgx, int lmac_id);
+ int (*mac_lmac_intl_lbk)(void *cgx, int lmac_id,
+ bool enable);
+ /* Register Stats related functions */
+ int (*mac_get_rx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *rx_stat);
+ int (*mac_get_tx_stats)(void *cgx, int lmac_id,
+ int idx, u64 *tx_stat);
+
+ /* Enable LMAC Pause Frame Configuration */
+ void (*mac_enadis_rx_pause_fwding)(void *cgxd,
+ int lmac_id,
+ bool enable);
+
+ int (*mac_get_pause_frm_status)(void *cgxd,
+ int lmac_id,
+ u8 *tx_pause,
+ u8 *rx_pause);
+
+ int (*mac_enadis_pause_frm)(void *cgxd,
+ int lmac_id,
+ u8 tx_pause,
+ u8 rx_pause);
+
+ void (*mac_pause_frm_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
+};
+
+struct cgx {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ u8 cgx_id;
+ u8 lmac_count;
+ struct lmac *lmac_idmap[MAX_LMAC_PER_CGX];
+ struct work_struct cgx_cmd_work;
+ struct workqueue_struct *cgx_cmd_workq;
+ struct list_head cgx_list;
+ u64 hw_features;
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap; /* bitmap of enabled lmacs */
+ /* Lock to serialize read/write of global csrs like
+ * RPMX_MTI_STAT_DATA_HI_CDC etc
+ */
+ struct mutex lock;
+};
+
+typedef struct cgx rpm_t;
+
+/* Function Declarations */
+void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val);
+u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset);
+struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx);
+int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac);
+int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id);
+bool is_lmac_valid(struct cgx *cgx, int lmac_id);
+struct mac_ops *rpm_get_mac_ops(void);
+
+#endif /* LMAC_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index bbabb8e64201..0a37ca96aab8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -20,9 +20,9 @@ static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
void __otx2_mbox_reset(struct otx2_mbox *mbox, int devid)
{
- void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -56,12 +56,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox)
}
EXPORT_SYMBOL(otx2_mbox_destroy);
-int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
- void *reg_base, int direction, int ndevs)
+static int otx2_mbox_setup(struct otx2_mbox *mbox, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
{
- struct otx2_mbox_dev *mdev;
- int devid;
-
switch (direction) {
case MBOX_DIR_AFPF:
case MBOX_DIR_PFVF:
@@ -121,7 +118,6 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
}
mbox->reg_base = reg_base;
- mbox->hwbase = hwbase;
mbox->pdev = pdev;
mbox->dev = kcalloc(ndevs, sizeof(struct otx2_mbox_dev), GFP_KERNEL);
@@ -129,11 +125,27 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
otx2_mbox_destroy(mbox);
return -ENOMEM;
}
-
mbox->ndevs = ndevs;
+
+ return 0;
+}
+
+int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
+ void *reg_base, int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase;
+
for (devid = 0; devid < ndevs; devid++) {
mdev = &mbox->dev[devid];
mdev->mbase = mbox->hwbase + (devid * MBOX_SIZE);
+ mdev->hwbase = mdev->mbase;
spin_lock_init(&mdev->mbox_lock);
/* Init header to reset value */
otx2_mbox_reset(mbox, devid);
@@ -143,6 +155,35 @@ int otx2_mbox_init(struct otx2_mbox *mbox, void *hwbase, struct pci_dev *pdev,
}
EXPORT_SYMBOL(otx2_mbox_init);
+/* Initialize mailbox with the set of mailbox region addresses
+ * in the array hwbase.
+ */
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void **hwbase,
+ struct pci_dev *pdev, void *reg_base,
+ int direction, int ndevs)
+{
+ struct otx2_mbox_dev *mdev;
+ int devid, err;
+
+ err = otx2_mbox_setup(mbox, pdev, reg_base, direction, ndevs);
+ if (err)
+ return err;
+
+ mbox->hwbase = hwbase[0];
+
+ for (devid = 0; devid < ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ mdev->mbase = hwbase[devid];
+ mdev->hwbase = hwbase[devid];
+ spin_lock_init(&mdev->mbox_lock);
+ /* Init header to reset value */
+ otx2_mbox_reset(mbox, devid);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(otx2_mbox_regions_init);
+
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid)
{
unsigned long timeout = jiffies + msecs_to_jiffies(MBOX_RSP_TIMEOUT);
@@ -175,9 +216,9 @@ EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
- void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE);
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
+ void *hw_mbase = mdev->hwbase;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index f919283ddc34..ea456099b33c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -36,7 +36,7 @@
#define INTR_MASK(pfvfs) ((pfvfs < 64) ? (BIT_ULL(pfvfs) - 1) : (~0ull))
-#define MBOX_RSP_TIMEOUT 2000 /* Time(ms) to wait for mbox response */
+#define MBOX_RSP_TIMEOUT 3000 /* Time(ms) to wait for mbox response */
#define MBOX_MSG_ALIGN 16 /* Align mbox msg start to 16bytes */
@@ -52,6 +52,7 @@
struct otx2_mbox_dev {
void *mbase; /* This dev's mbox region */
+ void *hwbase;
spinlock_t mbox_lock;
u16 msg_size; /* Total msg size to be sent */
u16 rsp_size; /* Total rsp size to be sure the reply is ok */
@@ -98,6 +99,9 @@ void otx2_mbox_destroy(struct otx2_mbox *mbox);
int otx2_mbox_init(struct otx2_mbox *mbox, void __force *hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs);
+int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
+ struct pci_dev *pdev, void __force *reg_base,
+ int direction, int ndevs);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
@@ -149,6 +153,16 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
+ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -237,6 +251,9 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
+ nix_cn10k_aq_enq_rsp) \
+M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -354,12 +371,17 @@ struct get_hw_cap_rsp {
struct cgx_stats_rsp {
struct mbox_msghdr hdr;
-#define CGX_RX_STATS_COUNT 13
+#define CGX_RX_STATS_COUNT 9
#define CGX_TX_STATS_COUNT 18
u64 rx_stats[CGX_RX_STATS_COUNT];
u64 tx_stats[CGX_TX_STATS_COUNT];
};
+struct cgx_fec_stats_rsp {
+ struct mbox_msghdr hdr;
+ u64 fec_corr_blks;
+ u64 fec_uncorr_blks;
+};
/* Structure for requesting the operation for
* setting/getting mac address in the CGX interface
*/
@@ -373,6 +395,8 @@ struct cgx_link_user_info {
uint64_t full_duplex:1;
uint64_t lmac_type_id:4;
uint64_t speed:20; /* speed in Mbps */
+ uint64_t an:1; /* AN supported or not */
+ uint64_t fec:2; /* FEC type if enabled else 0 */
#define LMACTYPE_STR_LEN 16
char lmac_type[LMACTYPE_STR_LEN];
};
@@ -391,6 +415,98 @@ struct cgx_pause_frm_cfg {
u8 tx_pause;
};
+enum fec_type {
+ OTX2_FEC_NONE,
+ OTX2_FEC_BASER,
+ OTX2_FEC_RS,
+ OTX2_FEC_STATS_CNT = 2,
+ OTX2_FEC_OFF,
+};
+
+struct fec_mode {
+ struct mbox_msghdr hdr;
+ int fec;
+};
+
+struct sfp_eeprom_s {
+#define SFP_EEPROM_SIZE 256
+ u16 sff_id;
+ u8 buf[SFP_EEPROM_SIZE];
+ u64 reserved;
+};
+
+struct phy_s {
+ struct {
+ u64 can_change_mod_type:1;
+ u64 mod_type:1;
+ u64 has_fec_stats:1;
+ } misc;
+ struct fec_stats_s {
+ u32 rsfec_corr_cws;
+ u32 rsfec_uncorr_cws;
+ u32 brfec_corr_blks;
+ u32 brfec_uncorr_blks;
+ } fec_stats;
+};
+
+struct cgx_lmac_fwdata_s {
+ u16 rw_valid;
+ u64 supported_fec;
+ u64 supported_an;
+ u64 supported_link_modes;
+ /* only applicable if AN is supported */
+ u64 advertised_fec;
+ u64 advertised_link_modes;
+ /* Only applicable if SFP/QSFP slot is present */
+ struct sfp_eeprom_s sfp_eeprom;
+ struct phy_s phy;
+#define LMAC_FWDATA_RESERVED_MEM 1021
+ u64 reserved[LMAC_FWDATA_RESERVED_MEM];
+};
+
+struct cgx_fw_data {
+ struct mbox_msghdr hdr;
+ struct cgx_lmac_fwdata_s fwdata;
+};
+
+struct cgx_set_link_mode_args {
+ u32 speed;
+ u8 duplex;
+ u8 an;
+ u8 ports;
+ u64 mode;
+};
+
+struct cgx_set_link_mode_req {
+#define AUTONEG_UNKNOWN 0xff
+ struct mbox_msghdr hdr;
+ struct cgx_set_link_mode_args args;
+};
+
+struct cgx_set_link_mode_rsp {
+ struct mbox_msghdr hdr;
+ int status;
+};
+
+#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precison time protocol */
+#define RVU_MAC_VERSION BIT_ULL(2)
+#define RVU_MAC_CGX BIT_ULL(3)
+#define RVU_MAC_RPM BIT_ULL(4)
+
+struct cgx_features_info_msg {
+ struct mbox_msghdr hdr;
+ u64 lmac_features;
+};
+
+struct rpm_stats_rsp {
+ struct mbox_msghdr hdr;
+#define RPM_RX_STATS_COUNT 43
+#define RPM_TX_STATS_COUNT 34
+ u64 rx_stats[RPM_RX_STATS_COUNT];
+ u64 tx_stats[RPM_TX_STATS_COUNT];
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -545,6 +661,39 @@ struct nix_lf_free_req {
u64 flags;
};
+/* CN10K NIX AQ enqueue msg */
+struct nix_cn10k_aq_enq_req {
+ struct mbox_msghdr hdr;
+ u32 qidx;
+ u8 ctype;
+ u8 op;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+ union {
+ struct nix_cn10k_rq_ctx_s rq_mask;
+ struct nix_cn10k_sq_ctx_s sq_mask;
+ struct nix_cq_ctx_s cq_mask;
+ struct nix_rsse_s rss_mask;
+ struct nix_rx_mce_s mce_mask;
+ };
+};
+
+struct nix_cn10k_aq_enq_rsp {
+ struct mbox_msghdr hdr;
+ union {
+ struct nix_cn10k_rq_ctx_s rq;
+ struct nix_cn10k_sq_ctx_s sq;
+ struct nix_cq_ctx_s cq;
+ struct nix_rsse_s rss;
+ struct nix_rx_mce_s mce;
+ };
+};
+
/* NIX AQ enqueue msg */
struct nix_aq_enq_req {
struct mbox_msghdr hdr;
@@ -717,6 +866,8 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
#define NIX_FLOW_KEY_TYPE_IPV4_PROTO BIT(21)
+#define NIX_FLOW_KEY_TYPE_AH BIT(22)
+#define NIX_FLOW_KEY_TYPE_ESP BIT(23)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -807,6 +958,12 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+struct nix_hw_info {
+ struct mbox_msghdr hdr;
+ u16 max_mtu;
+ u16 min_mtu;
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -1071,6 +1228,7 @@ struct cpt_rd_wr_reg_msg {
u64 *ret_val;
u64 val;
u8 is_write;
+ int blkaddr;
};
struct cpt_lf_alloc_req_msg {
@@ -1078,6 +1236,7 @@ struct cpt_lf_alloc_req_msg {
u16 nix_pf_func;
u16 sso_pf_func;
u16 eng_grpmsk;
+ int blkaddr;
};
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index a1f79445db71..3c640f6aba92 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -162,6 +162,11 @@ enum key_fields {
NPC_DIP_IPV4,
NPC_SIP_IPV6,
NPC_DIP_IPV6,
+ NPC_IPPROTO_TCP,
+ NPC_IPPROTO_UDP,
+ NPC_IPPROTO_SCTP,
+ NPC_IPPROTO_AH,
+ NPC_IPPROTO_ESP,
NPC_SPORT_TCP,
NPC_DPORT_TCP,
NPC_SPORT_UDP,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index f69f4f35ae48..1ee37853f338 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -21,6 +21,9 @@
#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00
+#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00
#define PCI_DEVID_OCTEONTX2_RST 0xA085
#define PCI_PTP_BAR_NO 0
@@ -234,6 +237,15 @@ static const struct pci_device_id ptp_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
PCI_VENDOR_ID_CAVIUM,
PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CN10K_A_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CNF10K_A_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_CNF10K_B_PTP) },
{ 0, }
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
new file mode 100644
index 000000000000..a91ccdc59403
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#include "cgx.h"
+#include "lmac_common.h"
+
+static struct mac_ops rpm_mac_ops = {
+ .name = "rpm",
+ .csr_offset = 0x4e00,
+ .lmac_offset = 20,
+ .int_register = RPMX_CMRX_SW_INT,
+ .int_set_reg = RPMX_CMRX_SW_INT_ENA_W1S,
+ .irq_offset = 1,
+ .int_ena_bit = BIT_ULL(0),
+ .lmac_fwi = RPM_LMAC_FWI,
+ .non_contiguous_serdes_lane = true,
+ .rx_stats_cnt = 43,
+ .tx_stats_cnt = 34,
+ .get_nr_lmacs = rpm_get_nr_lmacs,
+ .get_lmac_type = rpm_get_lmac_type,
+ .mac_lmac_intl_lbk = rpm_lmac_internal_loopback,
+ .mac_get_rx_stats = rpm_get_rx_stats,
+ .mac_get_tx_stats = rpm_get_tx_stats,
+ .mac_enadis_rx_pause_fwding = rpm_lmac_enadis_rx_pause_fwding,
+ .mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
+ .mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
+ .mac_pause_frm_config = rpm_lmac_pause_frm_config,
+};
+
+struct mac_ops *rpm_get_mac_ops(void)
+{
+ return &rpm_mac_ops;
+}
+
+static void rpm_write(rpm_t *rpm, u64 lmac, u64 offset, u64 val)
+{
+ cgx_write(rpm, lmac, offset, val);
+}
+
+static u64 rpm_read(rpm_t *rpm, u64 lmac, u64 offset)
+{
+ return cgx_read(rpm, lmac, offset);
+}
+
+int rpm_get_nr_lmacs(void *rpmd)
+{
+ rpm_t *rpm = rpmd;
+
+ return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
+}
+
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!rpm)
+ return;
+
+ if (enable) {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ } else {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ }
+}
+
+int rpm_lmac_get_pause_frm_status(void *rpmd, int lmac_id,
+ u8 *tx_pause, u8 *rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ *rx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ *tx_pause = !(cfg & RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE);
+ return 0;
+}
+
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ cfg |= rx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ cfg |= tx_pause ? 0x0 : RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ cfg = rpm_read(rpm, 0, RPMX_CMR_RX_OVR_BP);
+ if (tx_pause) {
+ cfg &= ~RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ } else {
+ cfg |= RPMX_CMR_RX_OVR_BP_EN(lmac_id);
+ cfg &= ~RPMX_CMR_RX_OVR_BP_BP(lmac_id);
+ }
+ rpm_write(rpm, 0, RPMX_CMR_RX_OVR_BP, cfg);
+ return 0;
+}
+
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (enable) {
+ /* Enable 802.3 pause frame mode */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable receive pause frames */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Enable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg &= ~RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Set pause time and interval */
+ cfg = rpm_read(rpm, lmac_id,
+ RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA);
+ cfg &= ~0xFFFFULL;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA,
+ cfg | RPM_DEFAULT_PAUSE_TIME);
+ /* Set pause interval as the hardware default is too short */
+ cfg = rpm_read(rpm, lmac_id,
+ RPMX_MTI_MAC100X_CL01_QUANTA_THRESH);
+ cfg &= ~0xFFFFULL;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_CL01_QUANTA_THRESH,
+ cfg | (RPM_DEFAULT_PAUSE_TIME / 2));
+
+ } else {
+ /* ALL pause frames received are completely ignored */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable forward pause to TX block */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+
+ /* Disable pause frames transmission */
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ cfg |= RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE;
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ }
+}
+
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Rx statistics page */
+ idx += lmac_id * rpm->mac_ops->rx_stats_cnt;
+
+ /* Read lower 32 bits of counter */
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+
+ /* upon read of lower 32 bits, higher 32 bits are written
+ * to RPMX_MTI_STAT_DATA_HI_CDC
+ */
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *rx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
+{
+ rpm_t *rpm = rpmd;
+ u64 val_lo, val_hi;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+
+ mutex_lock(&rpm->lock);
+
+ /* Update idx to point per lmac Tx statistics page */
+ idx += lmac_id * rpm->mac_ops->tx_stats_cnt;
+
+ val_lo = rpm_read(rpm, 0, RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX +
+ (idx * 8));
+ val_hi = rpm_read(rpm, 0, RPMX_MTI_STAT_DATA_HI_CDC);
+
+ *tx_stat = (val_hi << 32 | val_lo);
+
+ mutex_unlock(&rpm->lock);
+ return 0;
+}
+
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 req = 0, resp;
+ int err;
+
+ req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_LINK_STS, req);
+ err = cgx_fwi_cmd_generic(req, &resp, rpm, 0);
+ if (!err)
+ return FIELD_GET(RESP_LINKSTAT_LMAC_TYPE, resp);
+ return err;
+}
+
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u8 lmac_type;
+ u64 cfg;
+
+ if (!rpm || lmac_id >= rpm->lmac_count)
+ return -ENODEV;
+ lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
+ if (lmac_type == LMAC_MODE_100G_R) {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+ } else {
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
new file mode 100644
index 000000000000..d32e74bd5964
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RPM driver
+ *
+ * Copyright (C) 2020 Marvell.
+ *
+ */
+
+#ifndef RPM_H
+#define RPM_H
+
+#include <linux/bits.h>
+
+/* PCI device IDs */
+#define PCI_DEVID_CN10K_RPM 0xA060
+
+/* Registers */
+#define RPMX_CMRX_SW_INT 0x180
+#define RPMX_CMRX_SW_INT_W1S 0x188
+#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
+#define RPMX_CMRX_LINK_CFG 0x1070
+#define RPMX_MTI_PCS100X_CONTROL1 0x20000
+#define RPMX_MTI_LPCSX_CONTROL1 0x30000
+#define RPMX_MTI_PCS_LBK BIT_ULL(14)
+#define RPMX_MTI_LPCSX_CONTROL(id) (0x30000 | ((id) * 0x100))
+
+#define RPMX_CMRX_LINK_RANGE_MASK GENMASK_ULL(19, 16)
+#define RPMX_CMRX_LINK_BASE_MASK GENMASK_ULL(11, 0)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG 0x8010
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_RX_P_DISABLE BIT_ULL(29)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_TX_P_DISABLE BIT_ULL(28)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PAUSE_IGNORE BIT_ULL(8)
+#define RPMX_MTI_MAC100X_COMMAND_CONFIG_PFC_MODE BIT_ULL(19)
+#define RPMX_MTI_MAC100X_CL01_PAUSE_QUANTA 0x80A8
+#define RPMX_MTI_MAC100X_CL01_QUANTA_THRESH 0x80C8
+#define RPM_DEFAULT_PAUSE_TIME 0xFFFF
+#define RPMX_CMR_RX_OVR_BP 0x4120
+#define RPMX_CMR_RX_OVR_BP_EN(x) BIT_ULL((x) + 8)
+#define RPMX_CMR_RX_OVR_BP_BP(x) BIT_ULL((x) + 4)
+#define RPMX_MTI_STAT_RX_STAT_PAGES_COUNTERX 0x12000
+#define RPMX_MTI_STAT_TX_STAT_PAGES_COUNTERX 0x13000
+#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
+
+#define RPM_LMAC_FWI 0xa
+
+/* Function Declarations */
+int rpm_get_nr_lmacs(void *rpmd);
+u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
+int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable);
+void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_get_pause_frm_status(void *cgxd, int lmac_id, u8 *tx_pause,
+ u8 *rx_pause);
+void rpm_lmac_pause_frm_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
+ u8 rx_pause);
+int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
+int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index e8fd712860a1..d9a1a71c7ccc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -22,7 +22,7 @@
#include "rvu_trace.h"
-#define DRV_NAME "octeontx2-af"
+#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
@@ -78,6 +78,9 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu)
if (is_rvu_96xx_A0(rvu))
hw->cap.nix_rx_multicast = false;
}
+
+ if (!is_rvu_otx2(rvu))
+ hw->cap.per_pf_mbox_regs = true;
}
/* Poll a RVU block's register 'offset', for a 'zero'
@@ -646,7 +649,7 @@ setup_vfmsix:
}
/* HW interprets RVU_AF_MSIXTR_BASE address as an IOVA, hence
- * create a IOMMU mapping for the physcial address configured by
+ * create an IOMMU mapping for the physical address configured by
* firmware and reconfig RVU_AF_MSIXTR_BASE with IOVA.
*/
cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_CONST);
@@ -852,6 +855,31 @@ static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
return rvu_alloc_bitmap(&block->lf);
}
+static void rvu_get_lbk_bufsize(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+
+ /* cache fifo size */
+ rvu->hw->lbk_bufsize = FIELD_GET(LBK_CONST_BUF_SIZE, lbk_const);
+
+ iounmap(base);
+err_put:
+ pci_dev_put(pdev);
+}
+
static int rvu_setup_hw_resources(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1003,6 +1031,10 @@ cpt:
rvu_scan_block(rvu, block);
}
+ err = rvu_set_channels_base(rvu);
+ if (err)
+ goto msix_err;
+
err = rvu_npc_init(rvu);
if (err)
goto npc_err;
@@ -1018,10 +1050,14 @@ cpt:
if (err)
goto npa_err;
+ rvu_get_lbk_bufsize(rvu);
+
err = rvu_nix_init(rvu);
if (err)
goto nix_err;
+ rvu_program_channels(rvu);
+
return 0;
nix_err:
@@ -1323,7 +1359,7 @@ static int rvu_get_attach_blkaddr(struct rvu *rvu, int blktype,
break;
default:
return rvu_get_blkaddr(rvu, blktype, 0);
- };
+ }
if (is_block_implemented(rvu->hw, blkaddr))
return blkaddr;
@@ -1936,41 +1972,105 @@ static inline void rvu_afvf_mbox_up_handler(struct work_struct *work)
__rvu_mbox_up_handler(mwork, TYPE_AFVF);
}
+static int rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
+ int num, int type)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int region;
+ u64 bar4;
+
+ /* For cn10k platform VF mailbox regions of a PF follows after the
+ * PF <-> AF mailbox region. Whereas for Octeontx2 it is read from
+ * RVU_PF_VF_BAR4_ADDR register.
+ */
+ if (type == TYPE_AFVF) {
+ for (region = 0; region < num; region++) {
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(0)) +
+ MBOX_SIZE;
+ bar4 += region * MBOX_SIZE;
+ } else {
+ bar4 = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+ }
+
+ /* For cn10k platform AF <-> PF mailbox region of a PF is read from per
+ * PF registers. Whereas for Octeontx2 it is read from
+ * RVU_AF_PF_BAR4_ADDR register.
+ */
+ for (region = 0; region < num; region++) {
+ if (hw->cap.per_pf_mbox_regs) {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PFX_BAR4_ADDR(region));
+ } else {
+ bar4 = rvu_read64(rvu, BLKADDR_RVUM,
+ RVU_AF_PF_BAR4_ADDR);
+ bar4 += region * MBOX_SIZE;
+ }
+ mbox_addr[region] = (void *)ioremap_wc(bar4, MBOX_SIZE);
+ if (!mbox_addr[region])
+ goto error;
+ }
+ return 0;
+
+error:
+ while (region--)
+ iounmap((void __iomem *)mbox_addr[region]);
+ return -ENOMEM;
+}
+
static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
int type, int num,
void (mbox_handler)(struct work_struct *),
void (mbox_up_handler)(struct work_struct *))
{
- void __iomem *hwbase = NULL, *reg_base;
- int err, i, dir, dir_up;
+ int err = -EINVAL, i, dir, dir_up;
+ void __iomem *reg_base;
struct rvu_work *mwork;
+ void **mbox_regions;
const char *name;
- u64 bar4_addr;
+
+ mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
+ if (!mbox_regions)
+ return -ENOMEM;
switch (type) {
case TYPE_AFPF:
name = "rvu_afpf_mailbox";
- bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR);
dir = MBOX_DIR_AFPF;
dir_up = MBOX_DIR_AFPF_UP;
reg_base = rvu->afreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFPF);
+ if (err)
+ goto free_regions;
break;
case TYPE_AFVF:
name = "rvu_afvf_mailbox";
- bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR);
dir = MBOX_DIR_PFVF;
dir_up = MBOX_DIR_PFVF_UP;
reg_base = rvu->pfreg_base;
+ err = rvu_get_mbox_regions(rvu, mbox_regions, num, TYPE_AFVF);
+ if (err)
+ goto free_regions;
break;
default:
- return -EINVAL;
+ return err;
}
mw->mbox_wq = alloc_workqueue(name,
WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
num);
- if (!mw->mbox_wq)
- return -ENOMEM;
+ if (!mw->mbox_wq) {
+ err = -ENOMEM;
+ goto unmap_regions;
+ }
mw->mbox_wrk = devm_kcalloc(rvu->dev, num,
sizeof(struct rvu_work), GFP_KERNEL);
@@ -1986,23 +2086,13 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
goto exit;
}
- /* Mailbox is a reserved memory (in RAM) region shared between
- * RVU devices, shouldn't be mapped as device memory to allow
- * unaligned accesses.
- */
- hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num);
- if (!hwbase) {
- dev_err(rvu->dev, "Unable to map mailbox region\n");
- err = -ENOMEM;
- goto exit;
- }
-
- err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num);
+ err = otx2_mbox_regions_init(&mw->mbox, mbox_regions, rvu->pdev,
+ reg_base, dir, num);
if (err)
goto exit;
- err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev,
- reg_base, dir_up, num);
+ err = otx2_mbox_regions_init(&mw->mbox_up, mbox_regions, rvu->pdev,
+ reg_base, dir_up, num);
if (err)
goto exit;
@@ -2015,25 +2105,36 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
mwork->rvu = rvu;
INIT_WORK(&mwork->work, mbox_up_handler);
}
-
+ kfree(mbox_regions);
return 0;
+
exit:
- if (hwbase)
- iounmap((void __iomem *)hwbase);
destroy_workqueue(mw->mbox_wq);
+unmap_regions:
+ while (num--)
+ iounmap((void __iomem *)mbox_regions[num]);
+free_regions:
+ kfree(mbox_regions);
return err;
}
static void rvu_mbox_destroy(struct mbox_wq_info *mw)
{
+ struct otx2_mbox *mbox = &mw->mbox;
+ struct otx2_mbox_dev *mdev;
+ int devid;
+
if (mw->mbox_wq) {
flush_workqueue(mw->mbox_wq);
destroy_workqueue(mw->mbox_wq);
mw->mbox_wq = NULL;
}
- if (mw->mbox.hwbase)
- iounmap((void __iomem *)mw->mbox.hwbase);
+ for (devid = 0; devid < mbox->ndevs; devid++) {
+ mdev = &mbox->dev[devid];
+ if (mdev->hwbase)
+ iounmap((void __iomem *)mdev->hwbase);
+ }
otx2_mbox_destroy(&mw->mbox);
otx2_mbox_destroy(&mw->mbox_up);
@@ -2150,6 +2251,9 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf);
else if (block->addr == BLKADDR_NPA)
rvu_npa_lf_teardown(rvu, pcifunc, lf);
+ else if ((block->addr == BLKADDR_CPT0) ||
+ (block->addr == BLKADDR_CPT1))
+ rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
@@ -2650,8 +2754,6 @@ static void rvu_enable_afvf_intr(struct rvu *rvu)
rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
}
-#define PCI_DEVID_OCTEONTX2_LBK 0xA061
-
int rvu_get_num_lbk_chans(void)
{
struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index b1a6ecfd563e..fa6e46e36ae4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -19,12 +19,15 @@
#include "common.h"
#include "mbox.h"
#include "npc.h"
+#include "rvu_reg.h"
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065
+#define PCI_DEVID_OCTEONTX2_LBK 0xA061
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200
+#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -33,6 +36,7 @@
#define NAME_SIZE 32
#define MAX_NIX_BLKS 2
+#define MAX_CPT_BLKS 2
/* PF_FUNC */
#define RVU_PFVF_PF_SHIFT 10
@@ -47,6 +51,11 @@ struct dump_ctx {
bool all;
};
+struct cpt_ctx {
+ int blkaddr;
+ struct rvu *rvu;
+};
+
struct rvu_debugfs {
struct dentry *root;
struct dentry *cgx_root;
@@ -61,6 +70,7 @@ struct rvu_debugfs {
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
+ struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
int npa_qsize_id;
int nix_qsize_id;
};
@@ -296,6 +306,8 @@ struct hw_cap {
bool nix_shaping; /* Is shaping and coloring supported */
bool nix_tx_link_bp; /* Can link backpressure TL queues ? */
bool nix_rx_multicast; /* Rx packet replication support */
+ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
+ bool programmable_chans; /* Channels programmable ? */
};
struct rvu_hwinfo {
@@ -304,14 +316,20 @@ struct rvu_hwinfo {
u16 max_vfs_per_pf; /* Max VFs that can be attached to a PF */
u8 cgx;
u8 lmac_per_cgx;
+ u16 cgx_chan_base; /* CGX base channel number */
+ u16 lbk_chan_base; /* LBK base channel number */
+ u16 sdp_chan_base; /* SDP base channel number */
+ u16 cpt_chan_base; /* CPT base channel number */
u8 cgx_links;
u8 lbk_links;
u8 sdp_links;
+ u8 cpt_links; /* Number of CPT links */
u8 npc_kpus; /* No of parser units */
u8 npc_pkinds; /* No of port kinds */
u8 npc_intfs; /* No of interfaces */
u8 npc_kpu_entries; /* No of KPU entries */
u16 npc_counters; /* No of match stats counters */
+ u32 lbk_bufsize; /* FIFO size supported by LBK */
bool npc_ext_set; /* Extended register set */
struct hw_cap cap;
@@ -350,6 +368,10 @@ struct rvu_fwdata {
u64 msixtr_base;
#define FWDATA_RESERVED_MEM 1023
u64 reserved[FWDATA_RESERVED_MEM];
+#define CGX_MAX 5
+#define CGX_LMACS_MAX 4
+ struct cgx_lmac_fwdata_s cgx_fw_data[CGX_MAX][CGX_LMACS_MAX];
+ /* Do not add new fields below this line */
};
struct ptp;
@@ -465,6 +487,59 @@ static inline bool is_rvu_96xx_B0(struct rvu *rvu)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX);
}
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_LOKI 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+
+static inline bool is_rvu_otx2(struct rvu *rvu)
+{
+ struct pci_dev *pdev = rvu->pdev;
+
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM);
+}
+
+static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid,
+ u8 lmacid, u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 cgx_chans = nix_const & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_CGX_LMAC_CHX(cgxid, lmacid, chan);
+
+ return rvu->hw->cgx_chan_base +
+ (cgxid * hw->lmac_per_cgx + lmacid) * cgx_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
+ u8 chan)
+{
+ u64 nix_const = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST);
+ u16 lbk_chans = (nix_const >> 16) & 0xFFULL;
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return NIX_CHAN_LBK_CHX(lbkid, chan);
+
+ return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
+}
+
+static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
+{
+ return rvu->hw->cpt_chan_base + chan;
+}
+
/* Function Prototypes
* RVU
*/
@@ -601,6 +676,15 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
+u32 rvu_cgx_get_fifolen(struct rvu *rvu);
+
+/* CPT APIs */
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+
+/* CN10K RVU */
+int rvu_set_channels_base(struct rvu *rvu);
+void rvu_program_channels(struct rvu *rvu);
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 6c6b411e78fd..e668e482383a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -14,6 +14,7 @@
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "rvu_reg.h"
#include "rvu_trace.h"
@@ -42,6 +43,20 @@ static struct _req_type __maybe_unused \
MBOX_UP_CGX_MESSAGES
#undef M
+bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
+{
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ return (cgx_features_get(cgxd) & feature);
+}
+
/* Returns bitmap of mapped PFs */
static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{
@@ -92,9 +107,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
{
struct npc_pkind *pkind = &rvu->hw->pkind;
int cgx_cnt_max = rvu->cgx_cnt_max;
- int cgx, lmac_cnt, lmac;
int pf = PF_CGXMAP_BASE;
+ unsigned long lmac_bmap;
int size, free_pkind;
+ int cgx, lmac, iter;
if (!cgx_cnt_max)
return 0;
@@ -125,14 +141,17 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
if (!rvu_cgx_pdata(cgx, rvu))
continue;
- lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) {
+ lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
+ for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
+ lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
+ iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
rvu->cgx_mapped_pfs++;
+ pf++;
}
}
return 0;
@@ -154,8 +173,10 @@ static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
&qentry->link_event.link_uinfo);
qentry->link_event.cgx_id = cgx_id;
qentry->link_event.lmac_id = lmac_id;
- if (err)
+ if (err) {
+ kfree(qentry);
goto skip_add;
+ }
list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
skip_add:
spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
@@ -251,6 +272,7 @@ static void cgx_evhandler_task(struct work_struct *work)
static int cgx_lmac_event_handler_init(struct rvu *rvu)
{
+ unsigned long lmac_bmap;
struct cgx_event_cb cb;
int cgx, lmac, err;
void *cgxd;
@@ -271,7 +293,8 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
- for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) {
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err)
dev_err(rvu->dev,
@@ -349,6 +372,7 @@ int rvu_cgx_init(struct rvu *rvu)
int rvu_cgx_exit(struct rvu *rvu)
{
+ unsigned long lmac_bmap;
int cgx, lmac;
void *cgxd;
@@ -356,7 +380,8 @@ int rvu_cgx_exit(struct rvu *rvu)
cgxd = rvu_cgx_pdata(cgx, rvu);
if (!cgxd)
continue;
- for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++)
+ lmac_bmap = cgx_get_lmac_bmap(cgxd);
+ for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
cgx_lmac_evh_unregister(cgxd, lmac);
}
@@ -381,6 +406,7 @@ static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
{
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -390,11 +416,12 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
/* Set / clear CTL_BCK to control pause frame forwarding to NIX */
if (enable)
- cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
else
- cgx_lmac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
+ mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
}
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
@@ -426,10 +453,11 @@ int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
return 0;
}
-int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
- struct cgx_stats_rsp *rsp)
+static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
+ void *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
int stat = 0, err = 0;
u64 tx_stat, rx_stat;
u8 cgx_idx, lmac;
@@ -440,28 +468,63 @@ int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
/* Rx stats */
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac, stat, &rx_stat);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
if (err)
return err;
- rsp->rx_stats[stat] = rx_stat;
+ if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
stat++;
}
/* Tx stats */
stat = 0;
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac, stat, &tx_stat);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
if (err)
return err;
- rsp->tx_stats[stat] = tx_stat;
+ if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
+ ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
+ else
+ ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
stat++;
}
return 0;
}
+int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
+ struct cgx_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
+ struct rpm_stats_rsp *rsp)
+{
+ return rvu_lmac_get_stats(rvu, req, (void *)rsp);
+}
+
+int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ return cgx_get_fec_stats(cgxd, lmac, rsp);
+}
+
int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
@@ -538,6 +601,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
u8 cgx_id, lmac_id;
void *cgxd;
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
/* This msg is expected only from PFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
*/
@@ -624,17 +690,49 @@ int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
return err;
}
+int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_features_info_msg *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return 0;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->lmac_features = cgx_features_get(cgxd);
+
+ return 0;
+}
+
+u32 rvu_cgx_get_fifolen(struct rvu *rvu)
+{
+ struct mac_ops *mac_ops;
+ int rvu_def_cgx_id = 0;
+ u32 fifo_len;
+
+ mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+ fifo_len = mac_ops ? mac_ops->fifo_len : 0;
+
+ return fifo_len;
+}
+
static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
if (!is_cgx_config_permitted(rvu, pcifunc))
return -EPERM;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
- return cgx_lmac_internal_loopback(rvu_cgx_pdata(cgx_id, rvu),
+ return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, en);
}
@@ -657,7 +755,12 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
struct cgx_pause_frm_cfg *rsp)
{
int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
+ return 0;
/* This msg is expected only from PF/VFs that are mapped to CGX LMACs,
* if received from other PF/VF simply ACK, nothing to do.
@@ -666,16 +769,32 @@ int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
return -ENODEV;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
if (req->set)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- req->tx_pause, req->rx_pause);
+ mac_ops->mac_enadis_pause_frm(cgxd, lmac_id,
+ req->tx_pause, req->rx_pause);
else
- cgx_lmac_get_pause_frm(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
- &rsp->tx_pause, &rsp->rx_pause);
+ mac_ops->mac_get_pause_frm_status(cgxd, lmac_id,
+ &rsp->tx_pause,
+ &rsp->rx_pause);
return 0;
}
+int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
+}
+
/* Finds cumulative status of NIX rx/tx counters from LF of a PF and those
* from its VFs as well. ie. NIX rx/tx counters at the CGX port level
*/
@@ -767,3 +886,56 @@ exit:
mutex_unlock(&rvu->cgx_cfg_lock);
return err;
}
+
+int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
+ struct fec_mode *req,
+ struct fec_mode *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ if (req->fec == OTX2_FEC_OFF)
+ req->fec = OTX2_FEC_NONE;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
+ struct cgx_fw_data *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!rvu->fwdata)
+ return -ENXIO;
+
+ if (!is_pf_cgxmapped(rvu, pf))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
+ sizeof(struct cgx_lmac_fwdata_s));
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
+ struct cgx_set_link_mode_req *req,
+ struct cgx_set_link_mode_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
new file mode 100644
index 000000000000..7d9e71c6965f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell RPM CN10K driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/pci.h>
+#include "rvu.h"
+#include "cgx.h"
+#include "rvu_reg.h"
+
+int rvu_set_channels_base(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 cpt_chan_base;
+ u64 nix_const;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+
+ hw->cgx = (nix_const >> 12) & 0xFULL;
+ hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
+ hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
+ hw->lbk_links = (nix_const >> 24) & 0xFULL;
+ hw->cpt_links = (nix_const >> 44) & 0xFULL;
+ hw->sdp_links = 1;
+
+ hw->cgx_chan_base = NIX_CHAN_CGX_LMAC_CHX(0, 0, 0);
+ hw->lbk_chan_base = NIX_CHAN_LBK_CHX(0, 0);
+ hw->sdp_chan_base = NIX_CHAN_SDP_CH_START;
+
+ /* No Programmable channels */
+ if (!(nix_const & BIT_ULL(60)))
+ return 0;
+
+ hw->cap.programmable_chans = true;
+
+ /* If programmable channels are present then configure
+ * channels such that all channel numbers are contiguous
+ * leaving no holes. This way the new CPT channels can be
+ * accomodated. The order of channel numbers assigned is
+ * LBK, SDP, CGX and CPT.
+ */
+ hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links *
+ ((nix_const >> 16) & 0xFFULL);
+ hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS;
+
+ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links *
+ (nix_const & 0xFFULL);
+
+ /* Out of 4096 channels start CPT from 2048 so
+ * that MSB for CPT channels is always set
+ */
+ if (cpt_chan_base <= 0x800) {
+ hw->cpt_chan_base = 0x800;
+ } else {
+ dev_err(rvu->dev,
+ "CPT channels could not fit in the range 2048-4095\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define LBK_CONNECT_NIXX(a) (0x0 + (a))
+
+static void __rvu_lbk_set_chans(struct rvu *rvu, void __iomem *base,
+ u64 offset, int lbkid, u16 chans)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u64 cfg;
+
+ cfg = readq(base + offset);
+ cfg &= ~(LBK_LINK_CFG_RANGE_MASK |
+ LBK_LINK_CFG_ID_MASK | LBK_LINK_CFG_BASE_MASK);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_RANGE_MASK, ilog2(chans));
+ cfg |= FIELD_PREP(LBK_LINK_CFG_ID_MASK, lbkid);
+ cfg |= FIELD_PREP(LBK_LINK_CFG_BASE_MASK, hw->lbk_chan_base);
+
+ writeq(cfg, base + offset);
+}
+
+static void rvu_lbk_set_channels(struct rvu *rvu)
+{
+ struct pci_dev *pdev = NULL;
+ void __iomem *base;
+ u64 lbk_const;
+ u8 src, dst;
+ u16 chans;
+
+ /* To loopback packets between multiple NIX blocks
+ * mutliple LBK blocks are needed. With two NIX blocks,
+ * four LBK blocks are needed and each LBK block
+ * source and destination are as follows:
+ * LBK0 - source NIX0 and destination NIX1
+ * LBK1 - source NIX0 and destination NIX1
+ * LBK2 - source NIX1 and destination NIX0
+ * LBK3 - source NIX1 and destination NIX1
+ * As per the HRM channel numbers should be programmed as:
+ * P2X and X2P of LBK0 as same
+ * P2X and X2P of LBK3 as same
+ * P2X of LBK1 and X2P of LBK2 as same
+ * P2X of LBK2 and X2P of LBK1 as same
+ */
+ while (true) {
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_LBK, pdev);
+ if (!pdev)
+ return;
+
+ base = pci_ioremap_bar(pdev, 0);
+ if (!base)
+ goto err_put;
+
+ lbk_const = readq(base + LBK_CONST);
+ chans = FIELD_GET(LBK_CONST_CHANS, lbk_const);
+ dst = FIELD_GET(LBK_CONST_DST, lbk_const);
+ src = FIELD_GET(LBK_CONST_SRC, lbk_const);
+
+ if (src == dst) {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK0 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK3 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ }
+ } else {
+ if (src == LBK_CONNECT_NIXX(0)) { /* LBK1 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 0, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 1, chans);
+ } else if (src == LBK_CONNECT_NIXX(1)) { /* LBK2 */
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_X2P,
+ 1, chans);
+ __rvu_lbk_set_chans(rvu, base, LBK_LINK_CFG_P2X,
+ 0, chans);
+ }
+ }
+ iounmap(base);
+ }
+err_put:
+ pci_dev_put(pdev);
+}
+
+static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
+{
+ u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int link, nix_link = 0;
+ u16 start;
+ u64 cfg;
+
+ cgx_chans = nix_const & 0xFFULL;
+ lbk_chans = (nix_const >> 16) & 0xFFULL;
+ sdp_chans = SDP_CHANNELS;
+ cpt_chans = (nix_const >> 32) & 0xFFFULL;
+
+ start = hw->cgx_chan_base;
+ for (link = 0; link < hw->cgx_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cgx_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cgx_chans;
+ }
+
+ start = hw->lbk_chan_base;
+ for (link = 0; link < hw->lbk_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(lbk_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += lbk_chans;
+ }
+
+ start = hw->sdp_chan_base;
+ for (link = 0; link < hw->sdp_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(sdp_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += sdp_chans;
+ }
+
+ start = hw->cpt_chan_base;
+ for (link = 0; link < hw->cpt_links; link++, nix_link++) {
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link));
+ cfg &= ~(NIX_AF_LINKX_BASE_MASK | NIX_AF_LINKX_RANGE_MASK);
+ cfg |= FIELD_PREP(NIX_AF_LINKX_RANGE_MASK, ilog2(cpt_chans));
+ cfg |= FIELD_PREP(NIX_AF_LINKX_BASE_MASK, start);
+ rvu_write64(rvu, blkaddr, NIX_AF_LINKX_CFG(nix_link), cfg);
+ start += cpt_chans;
+ }
+}
+
+static void rvu_nix_set_channels(struct rvu *rvu)
+{
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ while (blkaddr) {
+ __rvu_nix_set_channels(rvu, blkaddr);
+ blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
+ }
+}
+
+static void __rvu_rpm_set_channels(int cgxid, int lmacid, u16 base)
+{
+ u64 cfg;
+
+ cfg = cgx_lmac_read(cgxid, lmacid, RPMX_CMRX_LINK_CFG);
+ cfg &= ~(RPMX_CMRX_LINK_BASE_MASK | RPMX_CMRX_LINK_RANGE_MASK);
+
+ /* There is no read-only constant register to read
+ * the number of channels for LMAC and it is always 16.
+ */
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(RPMX_CMRX_LINK_BASE_MASK, base);
+ cgx_lmac_write(cgxid, lmacid, RPMX_CMRX_LINK_CFG, cfg);
+}
+
+static void rvu_rpm_set_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 base = hw->cgx_chan_base;
+ int cgx, lmac;
+
+ for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) {
+ for (lmac = 0; lmac < hw->lmac_per_cgx; lmac++) {
+ __rvu_rpm_set_channels(cgx, lmac, base);
+ base += 16;
+ }
+ }
+}
+
+void rvu_program_channels(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+
+ if (!hw->cap.programmable_chans)
+ return;
+
+ rvu_nix_set_channels(rvu);
+ rvu_lbk_set_channels(rvu);
+ rvu_rpm_set_channels(rvu);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 35261d52c997..0945c3a3b180 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -65,13 +65,13 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
int num_lfs, slot;
u64 val;
+ blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
+
if (req->eng_grpmsk == 0x0)
return CPT_AF_ERR_GRP_INVALID;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return blkaddr;
-
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
block->addr);
@@ -114,23 +114,17 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
return 0;
}
-int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
- struct msg_rsp *rsp)
+static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
{
u16 pcifunc = req->hdr.pcifunc;
+ int num_lfs, cptlf, slot;
struct rvu_block *block;
- int cptlf, blkaddr;
- int num_lfs, slot;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return blkaddr;
block = &rvu->hw->block[blkaddr];
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
block->addr);
if (!num_lfs)
- return CPT_AF_ERR_LF_INVALID;
+ return 0;
for (slot = 0; slot < num_lfs; slot++) {
cptlf = rvu_get_lf(rvu, block, pcifunc, slot);
@@ -146,6 +140,21 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int ret;
+
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ ret = cpt_lf_free(rvu, req, BLKADDR_CPT1);
+
+ return ret;
+}
+
static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
{
u64 offset = req->reg_offset;
@@ -208,9 +217,9 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
{
int blkaddr;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return blkaddr;
+ blkaddr = req->blkaddr ? req->blkaddr : BLKADDR_CPT0;
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -ENODEV;
/* This message is accepted only if sent from CPT PF/VF */
if (!is_cpt_pf(rvu, req->hdr.pcifunc) &&
@@ -231,3 +240,92 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
return 0;
}
+
+#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
+#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
+#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
+#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
+
+static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
+{
+ int i = 0, hard_lp_ctr = 100000;
+ u64 inprog, grp_ptr;
+ u16 nq_ptr, dq_ptr;
+
+ /* Disable instructions enqueuing */
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
+
+ /* Disable executions in the LF's queue */
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ inprog &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
+
+ /* Wait for CPT queue to become execution-quiescent */
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ if (INPROG_GRB_PARTIAL(inprog)) {
+ i = 0;
+ hard_lp_ctr--;
+ } else {
+ i++;
+ }
+
+ grp_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot,
+ CPT_LF_Q_GRP_PTR));
+ nq_ptr = (grp_ptr >> 32) & 0x7FFF;
+ dq_ptr = grp_ptr & 0x7FFF;
+
+ } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+
+ i = 0;
+ hard_lp_ctr = 100000;
+ do {
+ inprog = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+
+ if ((INPROG_INFLIGHT(inprog) == 0) &&
+ (INPROG_GWB(inprog) < 40) &&
+ ((INPROG_GRB(inprog) == 0) ||
+ (INPROG_GRB((inprog)) == 40))) {
+ i++;
+ } else {
+ i = 0;
+ hard_lp_ctr--;
+ }
+ } while (hard_lp_ctr && (i < 10));
+
+ if (hard_lp_ctr == 0)
+ dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+}
+
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+{
+ int blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
+ if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
+ return -EINVAL;
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ cpt_lf_disable_iqueue(rvu, blkaddr, slot);
+
+ /* Set group drop to help clear out hardware */
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
+ reg |= BIT_ULL(17);
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index d27543c1a166..094124b695dc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -19,6 +19,7 @@
#include "rvu_reg.h"
#include "rvu.h"
#include "cgx.h"
+#include "lmac_common.h"
#include "npc.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -109,6 +110,89 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT17] = "Control/PAUSE packets sent",
};
+static char *rpm_rx_stats_fields[] = {
+ "Octets of received packets",
+ "Octets of received packets with out error",
+ "Received packets with alignment errors",
+ "Control/PAUSE packets received",
+ "Packets received with Frame too long Errors",
+ "Packets received with a1nrange length Errors",
+ "Received packets",
+ "Packets received with FrameCheckSequenceErrors",
+ "Packets received with VLAN header",
+ "Error packets",
+ "Packets received with unicast DMAC",
+ "Packets received with multicast DMAC",
+ "Packets received with broadcast DMAC",
+ "Dropped packets",
+ "Total frames received on interface",
+ "Packets received with an octet count < 64",
+ "Packets received with an octet count == 64",
+ "Packets received with an octet count of 65–127",
+ "Packets received with an octet count of 128-255",
+ "Packets received with an octet count of 256-511",
+ "Packets received with an octet count of 512-1023",
+ "Packets received with an octet count of 1024-1518",
+ "Packets received with an octet count of > 1518",
+ "Oversized Packets",
+ "Jabber Packets",
+ "Fragmented Packets",
+ "CBFC(class based flow control) pause frames received for class 0",
+ "CBFC pause frames received for class 1",
+ "CBFC pause frames received for class 2",
+ "CBFC pause frames received for class 3",
+ "CBFC pause frames received for class 4",
+ "CBFC pause frames received for class 5",
+ "CBFC pause frames received for class 6",
+ "CBFC pause frames received for class 7",
+ "CBFC pause frames received for class 8",
+ "CBFC pause frames received for class 9",
+ "CBFC pause frames received for class 10",
+ "CBFC pause frames received for class 11",
+ "CBFC pause frames received for class 12",
+ "CBFC pause frames received for class 13",
+ "CBFC pause frames received for class 14",
+ "CBFC pause frames received for class 15",
+ "MAC control packets received",
+};
+
+static char *rpm_tx_stats_fields[] = {
+ "Total octets sent on the interface",
+ "Total octets transmitted OK",
+ "Control/Pause frames sent",
+ "Total frames transmitted OK",
+ "Total frames sent with VLAN header",
+ "Error Packets",
+ "Packets sent to unicast DMAC",
+ "Packets sent to the multicast DMAC",
+ "Packets sent to a broadcast DMAC",
+ "Packets sent with an octet count == 64",
+ "Packets sent with an octet count of 65–127",
+ "Packets sent with an octet count of 128-255",
+ "Packets sent with an octet count of 256-511",
+ "Packets sent with an octet count of 512-1023",
+ "Packets sent with an octet count of 1024-1518",
+ "Packets sent with an octet count of > 1518",
+ "CBFC(class based flow control) pause frames transmitted for class 0",
+ "CBFC pause frames transmitted for class 1",
+ "CBFC pause frames transmitted for class 2",
+ "CBFC pause frames transmitted for class 3",
+ "CBFC pause frames transmitted for class 4",
+ "CBFC pause frames transmitted for class 5",
+ "CBFC pause frames transmitted for class 6",
+ "CBFC pause frames transmitted for class 7",
+ "CBFC pause frames transmitted for class 8",
+ "CBFC pause frames transmitted for class 9",
+ "CBFC pause frames transmitted for class 10",
+ "CBFC pause frames transmitted for class 11",
+ "CBFC pause frames transmitted for class 12",
+ "CBFC pause frames transmitted for class 13",
+ "CBFC pause frames transmitted for class 14",
+ "CBFC pause frames transmitted for class 15",
+ "MAC control packets sent",
+ "Total frames sent on the interface"
+};
+
enum cpt_eng_type {
CPT_AE_TYPE = 1,
CPT_SE_TYPE = 2,
@@ -234,6 +318,8 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
{
struct rvu *rvu = filp->private;
struct pci_dev *pdev = NULL;
+ struct mac_ops *mac_ops;
+ int rvu_def_cgx_id = 0;
char cgx[10], lmac[10];
struct rvu_pfvf *pfvf;
int pf, domain, blkid;
@@ -241,7 +327,9 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
u16 pcifunc;
domain = 2;
- seq_puts(filp, "PCI dev\t\tRVU PF Func\tNIX block\tCGX\tLMAC\n");
+ mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+ seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
+ mac_ops->name);
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
if (!is_pf_cgxmapped(rvu, pf))
continue;
@@ -262,7 +350,7 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id,
&lmac_id);
- sprintf(cgx, "CGX%d", cgx_id);
+ sprintf(cgx, "%s%d", mac_ops->name, cgx_id);
sprintf(lmac, "LMAC%d", lmac_id);
seq_printf(filp, "%s\t0x%x\t\tNIX%d\t\t%s\t%s\n",
dev_name(&pdev->dev), pcifunc, blkid, cgx, lmac);
@@ -449,6 +537,7 @@ RVU_DEBUG_SEQ_FOPS(npa_qsize, npa_qsize_display, npa_qsize_write);
static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_aura_s *aura = &rsp->aura;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Pool addr\t\t%llx\n", aura->pool_addr);
@@ -468,6 +557,9 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W3: limit\t\t%llu\nW3: bp\t\t\t%d\nW3: fc_ena\t\t%d\n",
(u64)aura->limit, aura->bp, aura->fc_ena);
+
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W3: fc_be\t\t%d\n", aura->fc_be);
seq_printf(m, "W3: fc_up_crossing\t%d\nW3: fc_stype\t\t%d\n",
aura->fc_up_crossing, aura->fc_stype);
seq_printf(m, "W3: fc_hyst_bits\t%d\n", aura->fc_hyst_bits);
@@ -485,12 +577,15 @@ static void print_npa_aura_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W5: err_qint_idx \t%d\n", aura->err_qint_idx);
seq_printf(m, "W6: thresh\t\t%llu\n", (u64)aura->thresh);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W6: fc_msh_dst\t\t%d\n", aura->fc_msh_dst);
}
/* Dumps given NPA Pool's context */
static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
{
struct npa_pool_s *pool = &rsp->pool;
+ struct rvu *rvu = m->private;
seq_printf(m, "W0: Stack base\t\t%llx\n", pool->stack_base);
@@ -512,6 +607,8 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
pool->avg_con, pool->fc_ena, pool->fc_stype);
seq_printf(m, "W4: fc_hyst_bits\t%d\nW4: fc_up_crossing\t%d\n",
pool->fc_hyst_bits, pool->fc_up_crossing);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W4: fc_be\t\t%d\n", pool->fc_be);
seq_printf(m, "W4: update_time\t\t%d\n", pool->update_time);
seq_printf(m, "W5: fc_addr\t\t%llx\n", pool->fc_addr);
@@ -525,8 +622,10 @@ static void print_npa_pool_ctx(struct seq_file *m, struct npa_aq_enq_rsp *rsp)
seq_printf(m, "W8: thresh_int\t\t%d\n", pool->thresh_int);
seq_printf(m, "W8: thresh_int_ena\t%d\nW8: thresh_up\t\t%d\n",
pool->thresh_int_ena, pool->thresh_up);
- seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t\t%d\n",
+ seq_printf(m, "W8: thresh_qint_idx\t%d\nW8: err_qint_idx\t%d\n",
pool->thresh_qint_idx, pool->err_qint_idx);
+ if (!is_rvu_otx2(rvu))
+ seq_printf(m, "W8: fc_msh_dst\t\t%d\n", pool->fc_msh_dst);
}
/* Reads aura/pool's ctx from admin queue */
@@ -910,11 +1009,78 @@ static int rvu_dbg_nix_ndc_tx_hits_miss_display(struct seq_file *filp,
RVU_DEBUG_SEQ_FOPS(nix_ndc_tx_hits_miss, nix_ndc_tx_hits_miss_display, NULL);
+static void print_nix_cn10k_sq_ctx(struct seq_file *m,
+ struct nix_cn10k_sq_ctx_s *sq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: qint_idx \t\t\t%d\n",
+ sq_ctx->ena, sq_ctx->qint_idx);
+ seq_printf(m, "W0: substream \t\t\t0x%03x\nW0: sdp_mcast \t\t\t%d\n",
+ sq_ctx->substream, sq_ctx->sdp_mcast);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: sqe_way_mask \t\t%d\n\n",
+ sq_ctx->cq, sq_ctx->sqe_way_mask);
+
+ seq_printf(m, "W1: smq \t\t\t%d\nW1: cq_ena \t\t\t%d\nW1: xoff\t\t\t%d\n",
+ sq_ctx->smq, sq_ctx->cq_ena, sq_ctx->xoff);
+ seq_printf(m, "W1: sso_ena \t\t\t%d\nW1: smq_rr_weight\t\t%d\n",
+ sq_ctx->sso_ena, sq_ctx->smq_rr_weight);
+ seq_printf(m, "W1: default_chan\t\t%d\nW1: sqb_count\t\t\t%d\n\n",
+ sq_ctx->default_chan, sq_ctx->sqb_count);
+
+ seq_printf(m, "W2: smq_rr_count_lb \t\t%d\n", sq_ctx->smq_rr_count_lb);
+ seq_printf(m, "W2: smq_rr_count_ub \t\t%d\n", sq_ctx->smq_rr_count_ub);
+ seq_printf(m, "W2: sqb_aura \t\t\t%d\nW2: sq_int \t\t\t%d\n",
+ sq_ctx->sqb_aura, sq_ctx->sq_int);
+ seq_printf(m, "W2: sq_int_ena \t\t\t%d\nW2: sqe_stype \t\t\t%d\n",
+ sq_ctx->sq_int_ena, sq_ctx->sqe_stype);
+
+ seq_printf(m, "W3: max_sqe_size\t\t%d\nW3: cq_limit\t\t\t%d\n",
+ sq_ctx->max_sqe_size, sq_ctx->cq_limit);
+ seq_printf(m, "W3: lmt_dis \t\t\t%d\nW3: mnq_dis \t\t\t%d\n",
+ sq_ctx->mnq_dis, sq_ctx->lmt_dis);
+ seq_printf(m, "W3: smq_next_sq\t\t\t%d\nW3: smq_lso_segnum\t\t%d\n",
+ sq_ctx->smq_next_sq, sq_ctx->smq_lso_segnum);
+ seq_printf(m, "W3: tail_offset \t\t%d\nW3: smenq_offset\t\t%d\n",
+ sq_ctx->tail_offset, sq_ctx->smenq_offset);
+ seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
+ sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+
+ seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
+ seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
+ seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
+ seq_printf(m, "W7: smenq_next_sqb \t\t%llx\n\n",
+ sq_ctx->smenq_next_sqb);
+
+ seq_printf(m, "W8: head_sqb\t\t\t%llx\n\n", sq_ctx->head_sqb);
+
+ seq_printf(m, "W9: vfi_lso_total\t\t%d\n", sq_ctx->vfi_lso_total);
+ seq_printf(m, "W9: vfi_lso_sizem1\t\t%d\nW9: vfi_lso_sb\t\t\t%d\n",
+ sq_ctx->vfi_lso_sizem1, sq_ctx->vfi_lso_sb);
+ seq_printf(m, "W9: vfi_lso_mps\t\t\t%d\nW9: vfi_lso_vlan0_ins_ena\t%d\n",
+ sq_ctx->vfi_lso_mps, sq_ctx->vfi_lso_vlan0_ins_ena);
+ seq_printf(m, "W9: vfi_lso_vlan1_ins_ena\t%d\nW9: vfi_lso_vld \t\t%d\n\n",
+ sq_ctx->vfi_lso_vld, sq_ctx->vfi_lso_vlan1_ins_ena);
+
+ seq_printf(m, "W10: scm_lso_rem \t\t%llu\n\n",
+ (u64)sq_ctx->scm_lso_rem);
+ seq_printf(m, "W11: octs \t\t\t%llu\n\n", (u64)sq_ctx->octs);
+ seq_printf(m, "W12: pkts \t\t\t%llu\n\n", (u64)sq_ctx->pkts);
+ seq_printf(m, "W14: dropped_octs \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_octs);
+ seq_printf(m, "W15: dropped_pkts \t\t%llu\n\n",
+ (u64)sq_ctx->dropped_pkts);
+}
+
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_sq_ctx(m, (struct nix_cn10k_sq_ctx_s *)sq_ctx);
+ return;
+ }
seq_printf(m, "W0: sqe_way_mask \t\t%d\nW0: cq \t\t\t\t%d\n",
sq_ctx->sqe_way_mask, sq_ctx->cq);
seq_printf(m, "W0: sdp_mcast \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
@@ -974,10 +1140,94 @@ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
(u64)sq_ctx->dropped_pkts);
}
+static void print_nix_cn10k_rq_ctx(struct seq_file *m,
+ struct nix_cn10k_rq_ctx_s *rq_ctx)
+{
+ seq_printf(m, "W0: ena \t\t\t%d\nW0: sso_ena \t\t\t%d\n",
+ rq_ctx->ena, rq_ctx->sso_ena);
+ seq_printf(m, "W0: ipsech_ena \t\t\t%d\nW0: ena_wqwd \t\t\t%d\n",
+ rq_ctx->ipsech_ena, rq_ctx->ena_wqwd);
+ seq_printf(m, "W0: cq \t\t\t\t%d\nW0: lenerr_dis \t\t\t%d\n",
+ rq_ctx->cq, rq_ctx->lenerr_dis);
+ seq_printf(m, "W0: csum_il4_dis \t\t%d\nW0: csum_ol4_dis \t\t%d\n",
+ rq_ctx->csum_il4_dis, rq_ctx->csum_ol4_dis);
+ seq_printf(m, "W0: len_il4_dis \t\t%d\nW0: len_il3_dis \t\t%d\n",
+ rq_ctx->len_il4_dis, rq_ctx->len_il3_dis);
+ seq_printf(m, "W0: len_ol4_dis \t\t%d\nW0: len_ol3_dis \t\t%d\n",
+ rq_ctx->len_ol4_dis, rq_ctx->len_ol3_dis);
+ seq_printf(m, "W0: wqe_aura \t\t\t%d\n\n", rq_ctx->wqe_aura);
+
+ seq_printf(m, "W1: spb_aura \t\t\t%d\nW1: lpb_aura \t\t\t%d\n",
+ rq_ctx->spb_aura, rq_ctx->lpb_aura);
+ seq_printf(m, "W1: spb_aura \t\t\t%d\n", rq_ctx->spb_aura);
+ seq_printf(m, "W1: sso_grp \t\t\t%d\nW1: sso_tt \t\t\t%d\n",
+ rq_ctx->sso_grp, rq_ctx->sso_tt);
+ seq_printf(m, "W1: pb_caching \t\t\t%d\nW1: wqe_caching \t\t%d\n",
+ rq_ctx->pb_caching, rq_ctx->wqe_caching);
+ seq_printf(m, "W1: xqe_drop_ena \t\t%d\nW1: spb_drop_ena \t\t%d\n",
+ rq_ctx->xqe_drop_ena, rq_ctx->spb_drop_ena);
+ seq_printf(m, "W1: lpb_drop_ena \t\t%d\nW1: pb_stashing \t\t%d\n",
+ rq_ctx->lpb_drop_ena, rq_ctx->pb_stashing);
+ seq_printf(m, "W1: ipsecd_drop_ena \t\t%d\nW1: chi_ena \t\t\t%d\n\n",
+ rq_ctx->ipsecd_drop_ena, rq_ctx->chi_ena);
+
+ seq_printf(m, "W2: band_prof_id \t\t%d\n", rq_ctx->band_prof_id);
+ seq_printf(m, "W2: policer_ena \t\t%d\n", rq_ctx->policer_ena);
+ seq_printf(m, "W2: spb_sizem1 \t\t\t%d\n", rq_ctx->spb_sizem1);
+ seq_printf(m, "W2: wqe_skip \t\t\t%d\nW2: sqb_ena \t\t\t%d\n",
+ rq_ctx->wqe_skip, rq_ctx->spb_ena);
+ seq_printf(m, "W2: lpb_size1 \t\t\t%d\nW2: first_skip \t\t\t%d\n",
+ rq_ctx->lpb_sizem1, rq_ctx->first_skip);
+ seq_printf(m, "W2: later_skip\t\t\t%d\nW2: xqe_imm_size\t\t%d\n",
+ rq_ctx->later_skip, rq_ctx->xqe_imm_size);
+ seq_printf(m, "W2: xqe_imm_copy \t\t%d\nW2: xqe_hdr_split \t\t%d\n\n",
+ rq_ctx->xqe_imm_copy, rq_ctx->xqe_hdr_split);
+
+ seq_printf(m, "W3: xqe_drop \t\t\t%d\nW3: xqe_pass \t\t\t%d\n",
+ rq_ctx->xqe_drop, rq_ctx->xqe_pass);
+ seq_printf(m, "W3: wqe_pool_drop \t\t%d\nW3: wqe_pool_pass \t\t%d\n",
+ rq_ctx->wqe_pool_drop, rq_ctx->wqe_pool_pass);
+ seq_printf(m, "W3: spb_pool_drop \t\t%d\nW3: spb_pool_pass \t\t%d\n",
+ rq_ctx->spb_pool_drop, rq_ctx->spb_pool_pass);
+ seq_printf(m, "W3: spb_aura_drop \t\t%d\nW3: spb_aura_pass \t\t%d\n\n",
+ rq_ctx->spb_aura_pass, rq_ctx->spb_aura_drop);
+
+ seq_printf(m, "W4: lpb_aura_drop \t\t%d\nW3: lpb_aura_pass \t\t%d\n",
+ rq_ctx->lpb_aura_pass, rq_ctx->lpb_aura_drop);
+ seq_printf(m, "W4: lpb_pool_drop \t\t%d\nW3: lpb_pool_pass \t\t%d\n",
+ rq_ctx->lpb_pool_drop, rq_ctx->lpb_pool_pass);
+ seq_printf(m, "W4: rq_int \t\t\t%d\nW4: rq_int_ena\t\t\t%d\n",
+ rq_ctx->rq_int, rq_ctx->rq_int_ena);
+ seq_printf(m, "W4: qint_idx \t\t\t%d\n\n", rq_ctx->qint_idx);
+
+ seq_printf(m, "W5: ltag \t\t\t%d\nW5: good_utag \t\t\t%d\n",
+ rq_ctx->ltag, rq_ctx->good_utag);
+ seq_printf(m, "W5: bad_utag \t\t\t%d\nW5: flow_tagw \t\t\t%d\n",
+ rq_ctx->bad_utag, rq_ctx->flow_tagw);
+ seq_printf(m, "W5: ipsec_vwqe \t\t\t%d\nW5: vwqe_ena \t\t\t%d\n",
+ rq_ctx->ipsec_vwqe, rq_ctx->vwqe_ena);
+ seq_printf(m, "W5: vwqe_wait \t\t\t%d\nW5: max_vsize_exp\t\t%d\n",
+ rq_ctx->vwqe_wait, rq_ctx->max_vsize_exp);
+ seq_printf(m, "W5: vwqe_skip \t\t\t%d\n\n", rq_ctx->vwqe_skip);
+
+ seq_printf(m, "W6: octs \t\t\t%llu\n\n", (u64)rq_ctx->octs);
+ seq_printf(m, "W7: pkts \t\t\t%llu\n\n", (u64)rq_ctx->pkts);
+ seq_printf(m, "W8: drop_octs \t\t\t%llu\n\n", (u64)rq_ctx->drop_octs);
+ seq_printf(m, "W9: drop_pkts \t\t\t%llu\n\n", (u64)rq_ctx->drop_pkts);
+ seq_printf(m, "W10: re_pkts \t\t\t%llu\n", (u64)rq_ctx->re_pkts);
+}
+
/* Dumps given nix_rq's context */
static void print_nix_rq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
struct nix_rq_ctx_s *rq_ctx = &rsp->rq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+
+ if (!is_rvu_otx2(rvu)) {
+ print_nix_cn10k_rq_ctx(m, (struct nix_cn10k_rq_ctx_s *)rq_ctx);
+ return;
+ }
seq_printf(m, "W0: wqe_aura \t\t\t%d\nW0: substream \t\t\t0x%03x\n",
rq_ctx->wqe_aura, rq_ctx->substream);
@@ -1439,6 +1689,7 @@ static void rvu_dbg_npa_init(struct rvu *rvu)
static int cgx_print_stats(struct seq_file *s, int lmac_id)
{
struct cgx_link_user_info linfo;
+ struct mac_ops *mac_ops;
void *cgxd = s->private;
u64 ucast, mcast, bcast;
int stat = 0, err = 0;
@@ -1450,6 +1701,11 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
if (!rvu)
return -ENODEV;
+ mac_ops = get_mac_ops(cgxd);
+
+ if (!mac_ops)
+ return 0;
+
/* Link status */
seq_puts(s, "\n=======Link Status======\n\n");
err = cgx_get_link_info(cgxd, lmac_id, &linfo);
@@ -1459,7 +1715,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
linfo.link_up ? "UP" : "DOWN", linfo.speed);
/* Rx stats */
- seq_puts(s, "\n=======NIX RX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX RX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXRX_STATUS(RX_UCAST, "rx_ucast_frames");
if (err)
return err;
@@ -1481,7 +1738,8 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Tx stats */
- seq_puts(s, "\n=======NIX TX_STATS(CGX port level)======\n\n");
+ seq_printf(s, "\n=======NIX TX_STATS(%s port level)======\n\n",
+ mac_ops->name);
ucast = PRINT_CGX_CUML_NIXTX_STATUS(TX_UCAST, "tx_ucast_frames");
if (err)
return err;
@@ -1500,24 +1758,35 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
/* Rx stats */
- seq_puts(s, "\n=======CGX RX_STATS======\n\n");
- while (stat < CGX_RX_STATS_COUNT) {
- err = cgx_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
+ seq_printf(s, "\n=======%s RX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->rx_stats_cnt) {
+ err = mac_ops->mac_get_rx_stats(cgxd, lmac_id, stat, &rx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat], rx_stat);
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_rx_stats_fields[stat],
+ rx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_rx_stats_fields[stat],
+ rx_stat);
stat++;
}
/* Tx stats */
stat = 0;
- seq_puts(s, "\n=======CGX TX_STATS======\n\n");
- while (stat < CGX_TX_STATS_COUNT) {
- err = cgx_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
+ seq_printf(s, "\n=======%s TX_STATS======\n\n", mac_ops->name);
+ while (stat < mac_ops->tx_stats_cnt) {
+ err = mac_ops->mac_get_tx_stats(cgxd, lmac_id, stat, &tx_stat);
if (err)
return err;
- seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat], tx_stat);
- stat++;
+
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
+ stat++;
}
return err;
@@ -1547,21 +1816,34 @@ RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
+ struct mac_ops *mac_ops;
+ unsigned long lmac_bmap;
+ int rvu_def_cgx_id = 0;
int i, lmac_id;
char dname[20];
void *cgx;
- rvu->rvu_dbg.cgx_root = debugfs_create_dir("cgx", rvu->rvu_dbg.root);
+ if (!cgx_get_cgxcnt_max())
+ return;
+
+ mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+ if (!mac_ops)
+ return;
+
+ rvu->rvu_dbg.cgx_root = debugfs_create_dir(mac_ops->name,
+ rvu->rvu_dbg.root);
for (i = 0; i < cgx_get_cgxcnt_max(); i++) {
cgx = rvu_cgx_pdata(i, rvu);
if (!cgx)
continue;
+ lmac_bmap = cgx_get_lmac_bmap(cgx);
/* cgx debugfs dir */
- sprintf(dname, "cgx%d", i);
+ sprintf(dname, "%s%d", mac_ops->name, i);
rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root);
- for (lmac_id = 0; lmac_id < cgx_get_lmac_cnt(cgx); lmac_id++) {
+
+ for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) {
/* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac =
@@ -1757,6 +2039,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s,
seq_printf(s, "mask 0x%x\n", ntohs(rule->mask.dport));
break;
default:
+ seq_puts(s, "\n");
break;
}
}
@@ -1785,7 +2068,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
break;
default:
break;
- };
+ }
} else {
switch (rule->rx_action.op) {
case NIX_RX_ACTIONOP_DROP:
@@ -1806,7 +2089,7 @@ static void rvu_dbg_npc_mcam_show_action(struct seq_file *s,
break;
default:
break;
- };
+ }
}
}
@@ -1903,20 +2186,16 @@ static void rvu_dbg_npc_init(struct rvu *rvu)
&rvu_dbg_npc_rx_miss_act_fops);
}
-/* CPT debugfs APIs */
static int cpt_eng_sts_display(struct seq_file *filp, u8 eng_type)
{
- struct rvu *rvu = filp->private;
+ struct cpt_ctx *ctx = filp->private;
u64 busy_sts = 0, free_sts = 0;
u32 e_min = 0, e_max = 0, e, i;
u16 max_ses, max_ies, max_aes;
- int blkaddr;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
u64 reg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
-
reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
max_ses = reg & 0xffff;
max_ies = (reg >> 16) & 0xffff;
@@ -1976,16 +2255,13 @@ RVU_DEBUG_SEQ_FOPS(cpt_ie_sts, cpt_ie_sts_display, NULL);
static int rvu_dbg_cpt_engines_info_display(struct seq_file *filp, void *unused)
{
- struct rvu *rvu = filp->private;
+ struct cpt_ctx *ctx = filp->private;
u16 max_ses, max_ies, max_aes;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
u32 e_max, e;
- int blkaddr;
u64 reg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
-
reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS1);
max_ses = reg & 0xffff;
max_ies = (reg >> 16) & 0xffff;
@@ -2013,17 +2289,15 @@ RVU_DEBUG_SEQ_FOPS(cpt_engines_info, cpt_engines_info_display, NULL);
static int rvu_dbg_cpt_lfs_info_display(struct seq_file *filp, void *unused)
{
- struct rvu *rvu = filp->private;
- struct rvu_hwinfo *hw = rvu->hw;
+ struct cpt_ctx *ctx = filp->private;
+ int blkaddr = ctx->blkaddr;
+ struct rvu *rvu = ctx->rvu;
struct rvu_block *block;
- int blkaddr;
+ struct rvu_hwinfo *hw;
u64 reg;
u32 lf;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
-
+ hw = rvu->hw;
block = &hw->block[blkaddr];
if (!block->lf.bmap)
return -ENODEV;
@@ -2048,13 +2322,10 @@ RVU_DEBUG_SEQ_FOPS(cpt_lfs_info, cpt_lfs_info_display, NULL);
static int rvu_dbg_cpt_err_info_display(struct seq_file *filp, void *unused)
{
- struct rvu *rvu = filp->private;
+ struct cpt_ctx *ctx = filp->private;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
u64 reg0, reg1;
- int blkaddr;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
@@ -2078,15 +2349,11 @@ RVU_DEBUG_SEQ_FOPS(cpt_err_info, cpt_err_info_display, NULL);
static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
{
- struct rvu *rvu;
- int blkaddr;
+ struct cpt_ctx *ctx = filp->private;
+ struct rvu *rvu = ctx->rvu;
+ int blkaddr = ctx->blkaddr;
u64 reg;
- rvu = filp->private;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, 0);
- if (blkaddr < 0)
- return -ENODEV;
-
reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_REQ_PC);
seq_printf(filp, "CPT instruction requests %llu\n", reg);
reg = rvu_read64(rvu, blkaddr, CPT_AF_INST_LATENCY_PC);
@@ -2107,45 +2374,76 @@ static int rvu_dbg_cpt_pc_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(cpt_pc, cpt_pc_display, NULL);
-static void rvu_dbg_cpt_init(struct rvu *rvu)
+static void rvu_dbg_cpt_init(struct rvu *rvu, int blkaddr)
{
- if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ struct cpt_ctx *ctx;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
return;
- rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ if (blkaddr == BLKADDR_CPT0) {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt", rvu->rvu_dbg.root);
+ ctx = &rvu->rvu_dbg.cpt_ctx[0];
+ ctx->blkaddr = BLKADDR_CPT0;
+ ctx->rvu = rvu;
+ } else {
+ rvu->rvu_dbg.cpt = debugfs_create_dir("cpt1",
+ rvu->rvu_dbg.root);
+ ctx = &rvu->rvu_dbg.cpt_ctx[1];
+ ctx->blkaddr = BLKADDR_CPT1;
+ ctx->rvu = rvu;
+ }
- debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_pc", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_pc_fops);
- debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_ae_sts", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_ae_sts_fops);
- debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_se_sts", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_se_sts_fops);
- debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_ie_sts", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_ie_sts_fops);
- debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_engines_info", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_engines_info_fops);
- debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_lfs_info", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_lfs_info_fops);
- debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, rvu,
+ debugfs_create_file("cpt_err_info", 0600, rvu->rvu_dbg.cpt, ctx,
&rvu_dbg_cpt_err_info_fops);
}
+static const char *rvu_get_dbg_dir_name(struct rvu *rvu)
+{
+ if (!is_rvu_otx2(rvu))
+ return "cn10k";
+ else
+ return "octeontx2";
+}
+
void rvu_dbg_init(struct rvu *rvu)
{
- rvu->rvu_dbg.root = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+ rvu->rvu_dbg.root = debugfs_create_dir(rvu_get_dbg_dir_name(rvu), NULL);
debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
&rvu_dbg_rsrc_status_fops);
- debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root, rvu,
- &rvu_dbg_rvu_pf_cgx_map_fops);
+ if (!cgx_get_cgxcnt_max())
+ goto create;
+
+ if (is_rvu_otx2(rvu))
+ debugfs_create_file("rvu_pf_cgx_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+ else
+ debugfs_create_file("rvu_pf_rpm_map", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_rvu_pf_cgx_map_fops);
+
+create:
rvu_dbg_npa_init(rvu);
rvu_dbg_nix_init(rvu, BLKADDR_NIX0);
rvu_dbg_nix_init(rvu, BLKADDR_NIX1);
rvu_dbg_cgx_init(rvu);
rvu_dbg_npc_init(rvu);
- rvu_dbg_cpt_init(rvu);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
+ rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index bc0e4113370e..10a98bcb7c54 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -52,6 +52,650 @@ static bool rvu_common_request_irq(struct rvu *rvu, int offset,
return rvu->irq_allocated[offset];
}
+static void rvu_nix_intr_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
+ "NIX_AF_RVU Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+ nix_event_context->nix_af_rvu_int = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_gen_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
+ "NIX_AF_GEN Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
+ nix_event_context->nix_af_rvu_gen = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_err_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
+ "NIX_AF_ERR Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_err = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_ras_work(struct work_struct *work)
+{
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
+
+ rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
+ devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
+ "NIX_AF_RAS Error",
+ rvu_nix_health_reporter->nix_event_ctx);
+}
+
+static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+ nix_event_context->nix_af_rvu_ras = intr;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+ queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
+
+ return IRQ_HANDLED;
+}
+
+static void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int offs, i, blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu_dl);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
+ blkaddr - BLKADDR_NIX0);
+ return 0;
+ }
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_GEN_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
+ "NIX_AF_GEN_INT",
+ rvu_nix_af_rvu_gen_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_rvu_err_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_rvu_ras_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return rc;
+}
+
+static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
+ enum nix_af_rvu_health health_reporter)
+{
+ struct rvu_nix_event_ctx *nix_event_context;
+ u64 intr_val;
+ int err;
+
+ nix_event_context = ctx;
+ switch (health_reporter) {
+ case NIX_AF_RVU_INTR:
+ intr_val = nix_event_context->nix_af_rvu_int;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
+ nix_event_context->nix_af_rvu_int);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_GEN:
+ intr_val = nix_event_context->nix_af_rvu_gen;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
+ nix_event_context->nix_af_rvu_gen);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_ERR:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(14)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(13)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(12)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(6)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(5)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ case NIX_AF_RVU_RAS:
+ intr_val = nix_event_context->nix_af_rvu_err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
+ nix_event_context->nix_af_rvu_err);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
+ if (err)
+ return err;
+ if (intr_val & BIT_ULL(34)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(33)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(32)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(4)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(3)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
+
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(2)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(1)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
+ if (err)
+ return err;
+ }
+ if (intr_val & BIT_ULL(0)) {
+ err = devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
+ if (err)
+ return err;
+ }
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
+}
+
+static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
+}
+
+static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_gen)
+ rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
+}
+
+static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_err)
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_ctx *nix_ctx;
+
+ nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
+
+ return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
+ rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
+}
+
+static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
+ void *ctx, struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+ struct rvu_nix_event_ctx *nix_event_ctx = ctx;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ if (nix_event_ctx->nix_af_rvu_int)
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+}
+
+RVU_REPORTERS(hw_nix_intr);
+RVU_REPORTERS(hw_nix_gen);
+RVU_REPORTERS(hw_nix_err);
+RVU_REPORTERS(hw_nix_ras);
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
+
+static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *rvu_reporters;
+ struct rvu_nix_event_ctx *nix_event_context;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
+ if (!rvu_reporters)
+ return -ENOMEM;
+
+ rvu_dl->rvu_nix_health_reporter = rvu_reporters;
+ nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
+ if (!nix_event_context)
+ return -ENOMEM;
+
+ rvu_reporters->nix_event_ctx = nix_event_context;
+ rvu_reporters->rvu_hw_nix_intr_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_gen_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_err_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
+ }
+
+ rvu_reporters->rvu_hw_nix_ras_reporter =
+ devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
+ if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
+ dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
+ PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
+ return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
+ }
+
+ rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
+ if (!rvu_dl->devlink_wq)
+ goto err;
+
+ INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
+ INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
+ INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
+ INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
+
+ return 0;
+err:
+ rvu_nix_health_reporters_destroy(rvu_dl);
+ return -ENOMEM;
+}
+
+static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct rvu *rvu = rvu_dl->rvu;
+ int err;
+
+ err = rvu_nix_register_reporters(rvu_dl);
+ if (err) {
+ dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
+ err);
+ return err;
+ }
+ rvu_nix_register_interrupts(rvu);
+
+ return 0;
+}
+
+static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ struct rvu_nix_health_reporters *nix_reporters;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ nix_reporters = rvu_dl->rvu_nix_health_reporter;
+
+ if (!nix_reporters->rvu_hw_nix_ras_reporter)
+ return;
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
+
+ if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
+ devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
+
+ rvu_nix_unregister_interrupts(rvu);
+ kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
+ kfree(rvu_dl->rvu_nix_health_reporter);
+}
+
static void rvu_npa_intr_work(struct work_struct *work)
{
struct rvu_npa_health_reporters *rvu_npa_health_reporter;
@@ -698,9 +1342,14 @@ static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
static int rvu_health_reporters_create(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
+ int err;
rvu_dl = rvu->rvu_dl;
- return rvu_npa_health_reporters_create(rvu_dl);
+ err = rvu_npa_health_reporters_create(rvu_dl);
+ if (err)
+ return err;
+
+ return rvu_nix_health_reporters_create(rvu_dl);
}
static void rvu_health_reporters_destroy(struct rvu *rvu)
@@ -712,6 +1361,7 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_dl = rvu->rvu_dl;
rvu_npa_health_reporters_destroy(rvu_dl);
+ rvu_nix_health_reporters_destroy(rvu_dl);
}
static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
index d7578fa92ac1..471e57dedb20 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -41,11 +41,38 @@ struct rvu_npa_health_reporters {
struct work_struct ras_work;
};
+enum nix_af_rvu_health {
+ NIX_AF_RVU_INTR,
+ NIX_AF_RVU_GEN,
+ NIX_AF_RVU_ERR,
+ NIX_AF_RVU_RAS,
+};
+
+struct rvu_nix_event_ctx {
+ u64 nix_af_rvu_int;
+ u64 nix_af_rvu_gen;
+ u64 nix_af_rvu_err;
+ u64 nix_af_rvu_ras;
+};
+
+struct rvu_nix_health_reporters {
+ struct rvu_nix_event_ctx *nix_event_ctx;
+ struct devlink_health_reporter *rvu_hw_nix_intr_reporter;
+ struct work_struct intr_work;
+ struct devlink_health_reporter *rvu_hw_nix_gen_reporter;
+ struct work_struct gen_work;
+ struct devlink_health_reporter *rvu_hw_nix_err_reporter;
+ struct work_struct err_work;
+ struct devlink_health_reporter *rvu_hw_nix_ras_reporter;
+ struct work_struct ras_work;
+};
+
struct rvu_devlink {
struct devlink *dl;
struct rvu *rvu;
struct workqueue_struct *devlink_wq;
struct rvu_npa_health_reporters *rvu_npa_health_reporter;
+ struct rvu_nix_health_reporters *rvu_nix_health_reporter;
};
/* Devlink APIs */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index a8dfbb6d1774..d3000194e2d3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -16,6 +16,7 @@
#include "rvu.h"
#include "npc.h"
#include "cgx.h"
+#include "lmac_common.h"
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
@@ -214,6 +215,7 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ struct mac_ops *mac_ops;
int pkind, pf, vf, lbkid;
u8 cgx_id, lmac_id;
int err;
@@ -233,17 +235,19 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
"PF_Func 0x%x: Invalid pkind\n", pcifunc);
return -EINVAL;
}
- pfvf->rx_chan_base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0);
+ pfvf->rx_chan_base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0);
pfvf->tx_chan_base = pfvf->rx_chan_base;
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind);
rvu_npc_set_pkind(rvu, pkind, pfvf);
+ mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
/* By default we enable pause frames */
if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
- cgx_lmac_set_pause_frm(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true, true);
+ mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
+ rvu),
+ lmac_id, true, true);
break;
case NIX_INTF_TYPE_LBK:
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
@@ -262,10 +266,10 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
* loopback channels.Therefore if odd number of AF VFs are
* enabled then the last VF remains with no pair.
*/
- pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(lbkid, vf);
+ pfvf->rx_chan_base = rvu_nix_chan_lbk(rvu, lbkid, vf);
pfvf->tx_chan_base = vf & 0x1 ?
- NIX_CHAN_LBK_CHX(lbkid, vf - 1) :
- NIX_CHAN_LBK_CHX(lbkid, vf + 1);
+ rvu_nix_chan_lbk(rvu, lbkid, vf - 1) :
+ rvu_nix_chan_lbk(rvu, lbkid, vf + 1);
pfvf->rx_chan_cnt = 1;
pfvf->tx_chan_cnt = 1;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
@@ -1000,6 +1004,14 @@ int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu,
return rvu_nix_aq_enq_inst(rvu, req, rsp);
}
#endif
+/* CN10K mbox handler */
+int rvu_mbox_handler_nix_cn10k_aq_enq(struct rvu *rvu,
+ struct nix_cn10k_aq_enq_req *req,
+ struct nix_cn10k_aq_enq_rsp *rsp)
+{
+ return rvu_nix_aq_enq_inst(rvu, (struct nix_aq_enq_req *)req,
+ (struct nix_aq_enq_rsp *)rsp);
+}
int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu,
struct hwctx_disable_req *req,
@@ -2535,6 +2547,43 @@ static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static void rvu_get_lbk_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* CN10K supports LBK FIFO size 72 KB */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ *max_mtu = CN10K_LBK_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+static void rvu_get_lmac_link_max_frs(struct rvu *rvu, u16 *max_mtu)
+{
+ /* RPM supports FIFO len 128 KB */
+ if (rvu_cgx_get_fifolen(rvu) == 0x20000)
+ *max_mtu = CN10K_LMAC_LINK_MAX_FRS;
+ else
+ *max_mtu = NIC_HW_MAX_FRS;
+}
+
+int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req,
+ struct nix_hw_info *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ int blkaddr;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &rsp->max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu);
+
+ rsp->min_mtu = NIC_HW_MIN_FRS;
+ return 0;
+}
+
int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -2580,6 +2629,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
struct nix_rx_flowkey_alg *field;
struct nix_rx_flowkey_alg tmp;
u32 key_type, valid_key;
+ int l4_key_offset;
if (!alg)
return -EINVAL;
@@ -2712,6 +2762,12 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field_marker = false;
keyoff_marker = false;
}
+
+ /* TCP/UDP/SCTP and ESP/AH falls at same offset so
+ * remember the TCP key offset of 40 byte hash key.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_TCP)
+ l4_key_offset = key_off;
break;
case NIX_FLOW_KEY_TYPE_NVGRE:
field->lid = NPC_LID_LD;
@@ -2783,11 +2839,31 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_mask = 0xF;
field->fn_mask = 1; /* Mask out the first nibble */
break;
+ case NIX_FLOW_KEY_TYPE_AH:
+ case NIX_FLOW_KEY_TYPE_ESP:
+ field->hdr_offset = 0;
+ field->bytesm1 = 7; /* SPI + sequence number */
+ field->ltype_mask = 0xF;
+ field->lid = NPC_LID_LE;
+ field->ltype_match = NPC_LT_LE_ESP;
+ if (key_type == NIX_FLOW_KEY_TYPE_AH) {
+ field->lid = NPC_LID_LD;
+ field->ltype_match = NPC_LT_LD_AH;
+ field->hdr_offset = 4;
+ keyoff_marker = false;
+ }
+ break;
}
field->ena = 1;
/* Found a valid flow key type */
if (valid_key) {
+ /* Use the key offset of TCP/UDP/SCTP fields
+ * for ESP/AH fields.
+ */
+ if (key_type == NIX_FLOW_KEY_TYPE_ESP ||
+ key_type == NIX_FLOW_KEY_TYPE_AH)
+ key_off = l4_key_offset;
field->key_offset = key_off;
memcpy(&alg[nr_field], field, sizeof(*field));
max_key_off = max(max_key_off, field->bytesm1 + 1);
@@ -3072,6 +3148,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
u64 cfg, lmac_fifo_len;
struct nix_hw *nix_hw;
u8 cgx = 0, lmac = 0;
+ u16 max_mtu;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
@@ -3081,7 +3158,12 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
if (!nix_hw)
return -EINVAL;
- if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS)
+ if (is_afvf(pcifunc))
+ rvu_get_lbk_link_max_frs(rvu, &max_mtu);
+ else
+ rvu_get_lmac_link_max_frs(rvu, &max_mtu);
+
+ if (!req->sdp_link && req->maxlen > max_mtu)
return NIX_AF_ERR_FRS_INVALID;
if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS)
@@ -3141,7 +3223,8 @@ linkcfg:
/* Update transmit credits for CGX links */
lmac_fifo_len =
- CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
+ rvu_cgx_get_fifolen(rvu) /
+ cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link));
cfg &= ~(0xFFFFFULL << 12);
cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12;
@@ -3181,23 +3264,40 @@ int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req,
return 0;
}
+static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs)
+{
+ /* CN10k supports 72KB FIFO size and max packet size of 64k */
+ if (rvu->hw->lbk_bufsize == 0x12000)
+ return (rvu->hw->lbk_bufsize - lbk_max_frs) / 16;
+
+ return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */
+}
+
static void nix_link_config(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
int cgx, lmac_cnt, slink, link;
+ u16 lbk_max_frs, lmac_max_frs;
u64 tx_credits;
+ rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs);
+ rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs);
+
/* Set default min/max packet lengths allowed on NIX Rx links.
*
* With HW reset minlen value of 60byte, HW will treat ARP pkts
* as undersize and report them to SW as error pkts, hence
* setting it to 40 bytes.
*/
- for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) {
+ for (link = 0; link < hw->cgx_links; link++) {
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
- NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS);
+ ((u64)lmac_max_frs << 16) | NIC_HW_MIN_FRS);
}
+ for (link = hw->cgx_links; link < hw->lbk_links; link++) {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
+ ((u64)lbk_max_frs << 16) | NIC_HW_MIN_FRS);
+ }
if (hw->sdp_links) {
link = hw->cgx_links + hw->lbk_links;
rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link),
@@ -3209,7 +3309,8 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
*/
for (cgx = 0; cgx < hw->cgx; cgx++) {
lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu));
- tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16;
+ tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) -
+ lmac_max_frs) / 16;
/* Enable credits and set credit pkt count to max allowed */
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
slink = cgx * hw->lmac_per_cgx;
@@ -3223,7 +3324,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr)
/* Set Tx credits for LBK link */
slink = hw->cgx_links;
for (link = slink; link < (slink + hw->lbk_links); link++) {
- tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */
+ tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs);
/* Enable credits and set credit pkt count to max allowed */
tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1);
rvu_write64(rvu, blkaddr,
@@ -3354,14 +3455,6 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
- /* Set num of links of each type */
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
- hw->cgx = (cfg >> 12) & 0xF;
- hw->lmac_per_cgx = (cfg >> 8) & 0xF;
- hw->cgx_links = hw->cgx * hw->lmac_per_cgx;
- hw->lbk_links = (cfg >> 24) & 0xF;
- hw->sdp_links = 1;
-
/* Initialize admin queue */
err = nix_aq_init(rvu, block);
if (err)
@@ -3596,10 +3689,14 @@ static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
- int blkaddr;
+ int blkaddr, pf;
int nixlf;
u64 cfg;
+ pf = rvu_get_pf(pcifunc);
+ if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
+ return 0;
+
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return NIX_AF_ERR_AF_LF_INVALID;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 5cf9b7a907ae..04bb0803a5c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -102,9 +102,9 @@ int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel)
return -EINVAL;
} else {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- base = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0x0);
+ base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0);
/* CGX mapped functions has maximum of 16 channels */
- end = NIX_CHAN_CGX_LMAC_CHX(cgx_id, lmac_id, 0xF);
+ end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF);
}
if (channel < base || channel > end)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 14832b66d1fe..4ba9d54ce4e3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -26,6 +26,11 @@ static const char * const npc_flow_names[] = {
[NPC_DIP_IPV4] = "ipv4 destination ip",
[NPC_SIP_IPV6] = "ipv6 source ip",
[NPC_DIP_IPV6] = "ipv6 destination ip",
+ [NPC_IPPROTO_TCP] = "ip proto tcp",
+ [NPC_IPPROTO_UDP] = "ip proto udp",
+ [NPC_IPPROTO_SCTP] = "ip proto sctp",
+ [NPC_IPPROTO_AH] = "ip proto AH",
+ [NPC_IPPROTO_ESP] = "ip proto ESP",
[NPC_SPORT_TCP] = "tcp source port",
[NPC_DPORT_TCP] = "tcp destination port",
[NPC_SPORT_UDP] = "udp source port",
@@ -212,13 +217,13 @@ static bool npc_check_overlap(struct rvu *rvu, int blkaddr,
return false;
}
-static int npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
- u8 intf)
+static bool npc_check_field(struct rvu *rvu, int blkaddr, enum key_fields type,
+ u8 intf)
{
if (!npc_is_field_present(rvu, type, intf) ||
npc_check_overlap(rvu, blkaddr, type, 0, intf))
- return -EOPNOTSUPP;
- return 0;
+ return false;
+ return true;
}
static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
@@ -269,7 +274,7 @@ static void npc_scan_parse_result(struct npc_mcam *mcam, u8 bit_number,
break;
default:
return;
- };
+ }
npc_set_kw_masks(mcam, type, nr_bits, kwi, offset, intf);
}
@@ -448,14 +453,13 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
struct npc_mcam *mcam = &rvu->hw->mcam;
u64 *features = &mcam->rx_features;
u64 tcp_udp_sctp;
- int err, hdr;
+ int hdr;
if (is_npc_intf_tx(intf))
features = &mcam->tx_features;
for (hdr = NPC_DMAC; hdr < NPC_HEADER_FIELDS_MAX; hdr++) {
- err = npc_check_field(rvu, blkaddr, hdr, intf);
- if (!err)
+ if (npc_check_field(rvu, blkaddr, hdr, intf))
*features |= BIT_ULL(hdr);
}
@@ -464,13 +468,26 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP);
/* for tcp/udp/sctp corresponding layer type should be in the key */
- if (*features & tcp_udp_sctp)
- if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ if (*features & tcp_udp_sctp) {
+ if (!npc_check_field(rvu, blkaddr, NPC_LD, intf))
*features &= ~tcp_udp_sctp;
+ else
+ *features |= BIT_ULL(NPC_IPPROTO_TCP) |
+ BIT_ULL(NPC_IPPROTO_UDP) |
+ BIT_ULL(NPC_IPPROTO_SCTP);
+ }
+
+ /* for AH, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LD, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_AH);
+
+ /* for ESP, check if corresponding layer type is present in the key */
+ if (npc_check_field(rvu, blkaddr, NPC_LE, intf))
+ *features |= BIT_ULL(NPC_IPPROTO_ESP);
/* for vlan corresponding layer type should be in the key */
if (*features & BIT_ULL(NPC_OUTER_VID))
- if (npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
*features &= ~BIT_ULL(NPC_OUTER_VID);
}
@@ -743,13 +760,13 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
return;
/* For tcp/udp/sctp LTYPE should be present in entry */
- if (features & (BIT_ULL(NPC_SPORT_TCP) | BIT_ULL(NPC_DPORT_TCP)))
+ if (features & BIT_ULL(NPC_IPPROTO_TCP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_TCP,
0, ~0ULL, 0, intf);
- if (features & (BIT_ULL(NPC_SPORT_UDP) | BIT_ULL(NPC_DPORT_UDP)))
+ if (features & BIT_ULL(NPC_IPPROTO_UDP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_UDP,
0, ~0ULL, 0, intf);
- if (features & (BIT_ULL(NPC_SPORT_SCTP) | BIT_ULL(NPC_DPORT_SCTP)))
+ if (features & BIT_ULL(NPC_IPPROTO_SCTP))
npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_SCTP,
0, ~0ULL, 0, intf);
@@ -758,6 +775,15 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry,
NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0,
NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf);
+ /* For AH, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_AH))
+ npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH,
+ 0, ~0ULL, 0, intf);
+ /* For ESP, LTYPE should be present in entry */
+ if (features & BIT_ULL(NPC_IPPROTO_ESP))
+ npc_update_entry(rvu, NPC_LE, entry, NPC_LT_LE_ESP,
+ 0, ~0ULL, 0, intf);
+
#define NPC_WRITE_FLOW(field, member, val_lo, val_hi, mask_lo, mask_hi) \
do { \
if (features & BIT_ULL((field))) { \
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 0fb2aa909a23..3e401fd8ac63 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -44,6 +44,11 @@
#define RVU_AF_PFME_INT_W1S (0x28c8)
#define RVU_AF_PFME_INT_ENA_W1S (0x28d0)
#define RVU_AF_PFME_INT_ENA_W1C (0x28d8)
+#define RVU_AF_PFX_BAR4_ADDR(a) (0x5000 | (a) << 4)
+#define RVU_AF_PFX_BAR4_CFG (0x5200 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
+#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
+#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -100,6 +105,8 @@
#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
@@ -399,12 +406,16 @@
#define NIX_AF_RX_NPC_MIRROR_RCV (0x4720)
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
+#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
#define NIX_PRIV_LFX_INT_CFG (0x8000020)
#define NIX_AF_RVU_LF_CFG_DEBUG (0x8000030)
+#define NIX_AF_LINKX_BASE_MASK GENMASK_ULL(11, 0)
+#define NIX_AF_LINKX_RANGE_MASK GENMASK_ULL(19, 16)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
@@ -484,9 +495,17 @@
#define CPT_AF_RAS_INT_ENA_W1S (0x47030)
#define CPT_AF_RAS_INT_ENA_W1C (0x47038)
+#define AF_BAR2_ALIASX(a, b) (0x9100000ull | (a) << 12 | (b))
+#define CPT_AF_BAR2_SEL 0x9000000
+#define CPT_AF_BAR2_ALIASX(a, b) AF_BAR2_ALIASX(a, b)
+
#define CPT_AF_LF_CTL2_SHIFT 3
#define CPT_AF_LF_SSO_PF_FUNC_SHIFT 32
+#define CPT_LF_CTL 0x10
+#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_GRP_PTR 0x120
+
#define NPC_AF_BLK_RST (0x00040)
/* NPC */
@@ -629,4 +648,17 @@
(0x00F00 | (a) << 5 | (b) << 4)
#define NDC_AF_BANKX_HIT_PC(a) (0x01000 | (a) << 3)
#define NDC_AF_BANKX_MISS_PC(a) (0x01100 | (a) << 3)
+
+/* LBK */
+#define LBK_CONST (0x10ull)
+#define LBK_LINK_CFG_P2X (0x400ull)
+#define LBK_LINK_CFG_X2P (0x408ull)
+#define LBK_CONST_CHANS GENMASK_ULL(47, 32)
+#define LBK_CONST_DST GENMASK_ULL(31, 28)
+#define LBK_CONST_SRC GENMASK_ULL(27, 24)
+#define LBK_CONST_BUF_SIZE GENMASK_ULL(23, 0)
+#define LBK_LINK_CFG_RANGE_MASK GENMASK_ULL(19, 16)
+#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
+#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index e2153d47c373..5e5f45c7eab0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -74,6 +74,16 @@ enum npa_af_int_vec_e {
NPA_AF_INT_VEC_CNT = 0x5,
};
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
/**
* RVU PF Interrupt Vector Enumeration
*/
@@ -129,63 +139,29 @@ enum npa_inpq {
/* NPA admin queue instruction structure */
struct npa_aq_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1; /* W0 */
- u64 reserved_44_62 : 19;
- u64 cindex : 20;
- u64 reserved_17_23 : 7;
- u64 lf : 9;
- u64 ctype : 4;
- u64 op : 4;
-#else
- u64 op : 4;
+ u64 op : 4; /* W0 */
u64 ctype : 4;
u64 lf : 9;
u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
-#endif
u64 res_addr; /* W1 */
};
/* NPA admin queue result structure */
struct npa_aq_res_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_17_63 : 47; /* W0 */
- u64 doneint : 1;
- u64 compcode : 8;
- u64 ctype : 4;
- u64 op : 4;
-#else
- u64 op : 4;
+ u64 op : 4; /* W0 */
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
-#endif
u64 reserved_64_127; /* W1 */
};
struct npa_aura_s {
u64 pool_addr; /* W0 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 avg_level : 8;
- u64 reserved_118_119 : 2;
- u64 shift : 6;
- u64 aura_drop : 8;
- u64 reserved_98_103 : 6;
- u64 bp_ena : 2;
- u64 aura_drop_ena : 1;
- u64 pool_drop_ena : 1;
- u64 reserved_93 : 1;
- u64 avg_con : 9;
- u64 pool_way_mask : 16;
- u64 pool_caching : 1;
- u64 reserved_65 : 2;
- u64 ena : 1;
-#else
- u64 ena : 1;
+ u64 ena : 1; /* W1 */
u64 reserved_65 : 2;
u64 pool_caching : 1;
u64 pool_way_mask : 16;
@@ -199,59 +175,24 @@ struct npa_aura_s {
u64 shift : 6;
u64 reserved_118_119 : 2;
u64 avg_level : 8;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 reserved_189_191 : 3;
- u64 nix1_bpid : 9;
- u64 reserved_177_179 : 3;
- u64 nix0_bpid : 9;
- u64 reserved_164_167 : 4;
- u64 count : 36;
-#else
- u64 count : 36;
+ u64 count : 36; /* W2 */
u64 reserved_164_167 : 4;
u64 nix0_bpid : 9;
u64 reserved_177_179 : 3;
u64 nix1_bpid : 9;
u64 reserved_189_191 : 3;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 reserved_252_255 : 4;
- u64 fc_hyst_bits : 4;
- u64 fc_stype : 2;
- u64 fc_up_crossing : 1;
- u64 fc_ena : 1;
- u64 reserved_240_243 : 4;
- u64 bp : 8;
- u64 reserved_228_231 : 4;
- u64 limit : 36;
-#else
- u64 limit : 36;
+ u64 limit : 36; /* W3 */
u64 reserved_228_231 : 4;
u64 bp : 8;
- u64 reserved_240_243 : 4;
+ u64 reserved_241_243 : 3;
+ u64 fc_be : 1;
u64 fc_ena : 1;
u64 fc_up_crossing : 1;
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 reserved_252_255 : 4;
-#endif
u64 fc_addr; /* W4 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
- u64 reserved_379_383 : 5;
- u64 err_qint_idx : 7;
- u64 reserved_371 : 1;
- u64 thresh_qint_idx : 7;
- u64 reserved_363 : 1;
- u64 thresh_up : 1;
- u64 thresh_int_ena : 1;
- u64 thresh_int : 1;
- u64 err_int_ena : 8;
- u64 err_int : 8;
- u64 update_time : 16;
- u64 pool_drop : 8;
-#else
- u64 pool_drop : 8;
+ u64 pool_drop : 8; /* W5 */
u64 update_time : 16;
u64 err_int : 8;
u64 err_int_ena : 8;
@@ -263,31 +204,15 @@ struct npa_aura_s {
u64 reserved_371 : 1;
u64 err_qint_idx : 7;
u64 reserved_379_383 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
- u64 reserved_420_447 : 28;
- u64 thresh : 36;
-#else
- u64 thresh : 36;
- u64 reserved_420_447 : 28;
-#endif
+ u64 thresh : 36; /* W6*/
+ u64 rsvd_423_420 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_435_447 : 13;
u64 reserved_448_511; /* W7 */
};
struct npa_pool_s {
u64 stack_base; /* W0 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 reserved_115_127 : 13;
- u64 buf_size : 11;
- u64 reserved_100_103 : 4;
- u64 buf_offset : 12;
- u64 stack_way_mask : 16;
- u64 reserved_70_71 : 3;
- u64 stack_caching : 1;
- u64 reserved_66_67 : 2;
- u64 nat_align : 1;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 nat_align : 1;
u64 reserved_66_67 : 2;
@@ -298,36 +223,10 @@ struct npa_pool_s {
u64 reserved_100_103 : 4;
u64 buf_size : 11;
u64 reserved_115_127 : 13;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 stack_pages : 32;
- u64 stack_max_pages : 32;
-#else
u64 stack_max_pages : 32;
u64 stack_pages : 32;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 reserved_240_255 : 16;
- u64 op_pc : 48;
-#else
u64 op_pc : 48;
u64 reserved_240_255 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
- u64 reserved_316_319 : 4;
- u64 update_time : 16;
- u64 reserved_297_299 : 3;
- u64 fc_up_crossing : 1;
- u64 fc_hyst_bits : 4;
- u64 fc_stype : 2;
- u64 fc_ena : 1;
- u64 avg_con : 9;
- u64 avg_level : 8;
- u64 reserved_270_271 : 2;
- u64 shift : 6;
- u64 reserved_260_263 : 4;
- u64 stack_offset : 4;
-#else
u64 stack_offset : 4;
u64 reserved_260_263 : 4;
u64 shift : 6;
@@ -338,26 +237,13 @@ struct npa_pool_s {
u64 fc_stype : 2;
u64 fc_hyst_bits : 4;
u64 fc_up_crossing : 1;
- u64 reserved_297_299 : 3;
+ u64 fc_be : 1;
+ u64 reserved_298_299 : 2;
u64 update_time : 16;
u64 reserved_316_319 : 4;
-#endif
u64 fc_addr; /* W5 */
u64 ptr_start; /* W6 */
u64 ptr_end; /* W7 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
- u64 reserved_571_575 : 5;
- u64 err_qint_idx : 7;
- u64 reserved_563 : 1;
- u64 thresh_qint_idx : 7;
- u64 reserved_555 : 1;
- u64 thresh_up : 1;
- u64 thresh_int_ena : 1;
- u64 thresh_int : 1;
- u64 err_int_ena : 8;
- u64 err_int : 8;
- u64 reserved_512_535 : 24;
-#else
u64 reserved_512_535 : 24;
u64 err_int : 8;
u64 err_int_ena : 8;
@@ -369,14 +255,10 @@ struct npa_pool_s {
u64 reserved_563 : 1;
u64 err_qint_idx : 7;
u64 reserved_571_575 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 reserved_612_639 : 28;
u64 thresh : 36;
-#else
- u64 thresh : 36;
- u64 reserved_612_639 : 28;
-#endif
+ u64 rsvd_615_612 : 4;
+ u64 fc_msh_dst : 11;
+ u64 reserved_627_639 : 13;
u64 reserved_640_703; /* W10 */
u64 reserved_704_767; /* W11 */
u64 reserved_768_831; /* W12 */
@@ -404,6 +286,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
+ NIX_AQ_CTYPE_BAND_PROF = 0x6,
};
/* NIX admin queue instruction opcodes */
@@ -418,59 +301,29 @@ enum nix_aq_instop {
/* NIX admin queue instruction structure */
struct nix_aq_inst_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 doneint : 1; /* W0 */
- u64 reserved_44_62 : 19;
- u64 cindex : 20;
- u64 reserved_15_23 : 9;
- u64 lf : 7;
- u64 ctype : 4;
- u64 op : 4;
-#else
u64 op : 4;
u64 ctype : 4;
- u64 lf : 7;
- u64 reserved_15_23 : 9;
+ u64 lf : 9;
+ u64 reserved_17_23 : 7;
u64 cindex : 20;
u64 reserved_44_62 : 19;
u64 doneint : 1;
-#endif
u64 res_addr; /* W1 */
};
/* NIX admin queue result structure */
struct nix_aq_res_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_17_63 : 47; /* W0 */
- u64 doneint : 1;
- u64 compcode : 8;
- u64 ctype : 4;
- u64 op : 4;
-#else
u64 op : 4;
u64 ctype : 4;
u64 compcode : 8;
u64 doneint : 1;
u64 reserved_17_63 : 47;
-#endif
u64 reserved_64_127; /* W1 */
};
/* NIX Completion queue context structure */
struct nix_cq_ctx_s {
u64 base;
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 wrptr : 20;
- u64 avg_con : 9;
- u64 cint_idx : 7;
- u64 cq_err : 1;
- u64 qint_idx : 7;
- u64 rsvd_81_83 : 3;
- u64 bpid : 9;
- u64 rsvd_69_71 : 3;
- u64 bp_ena : 1;
- u64 rsvd_64_67 : 4;
-#else
u64 rsvd_64_67 : 4;
u64 bp_ena : 1;
u64 rsvd_69_71 : 3;
@@ -481,31 +334,10 @@ struct nix_cq_ctx_s {
u64 cint_idx : 7;
u64 avg_con : 9;
u64 wrptr : 20;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 update_time : 16;
- u64 avg_level : 8;
- u64 head : 20;
- u64 tail : 20;
-#else
u64 tail : 20;
u64 head : 20;
u64 avg_level : 8;
u64 update_time : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 cq_err_int_ena : 8;
- u64 cq_err_int : 8;
- u64 qsize : 4;
- u64 rsvd_233_235 : 3;
- u64 caching : 1;
- u64 substream : 20;
- u64 rsvd_210_211 : 2;
- u64 ena : 1;
- u64 drop_ena : 1;
- u64 drop : 8;
- u64 bp : 8;
-#else
u64 bp : 8;
u64 drop : 8;
u64 drop_ena : 1;
@@ -517,20 +349,161 @@ struct nix_cq_ctx_s {
u64 qsize : 4;
u64 cq_err_int : 8;
u64 cq_err_int_ena : 8;
-#endif
+};
+
+/* CN10K NIX Receive queue context structure */
+struct nix_cn10k_rq_ctx_s {
+ u64 ena : 1;
+ u64 sso_ena : 1;
+ u64 ipsech_ena : 1;
+ u64 ena_wqwd : 1;
+ u64 cq : 20;
+ u64 rsvd_36_24 : 13;
+ u64 lenerr_dis : 1;
+ u64 csum_il4_dis : 1;
+ u64 csum_ol4_dis : 1;
+ u64 len_il4_dis : 1;
+ u64 len_il3_dis : 1;
+ u64 len_ol4_dis : 1;
+ u64 len_ol3_dis : 1;
+ u64 wqe_aura : 20;
+ u64 spb_aura : 20;
+ u64 lpb_aura : 20;
+ u64 sso_grp : 10;
+ u64 sso_tt : 2;
+ u64 pb_caching : 2;
+ u64 wqe_caching : 1;
+ u64 xqe_drop_ena : 1;
+ u64 spb_drop_ena : 1;
+ u64 lpb_drop_ena : 1;
+ u64 pb_stashing : 1;
+ u64 ipsecd_drop_ena : 1;
+ u64 chi_ena : 1;
+ u64 rsvd_127_125 : 3;
+ u64 band_prof_id : 10; /* W2 */
+ u64 rsvd_138 : 1;
+ u64 policer_ena : 1;
+ u64 spb_sizem1 : 6;
+ u64 wqe_skip : 2;
+ u64 rsvd_150_148 : 3;
+ u64 spb_ena : 1;
+ u64 lpb_sizem1 : 12;
+ u64 first_skip : 7;
+ u64 rsvd_171 : 1;
+ u64 later_skip : 6;
+ u64 xqe_imm_size : 6;
+ u64 rsvd_189_184 : 6;
+ u64 xqe_imm_copy : 1;
+ u64 xqe_hdr_split : 1;
+ u64 xqe_drop : 8; /* W3 */
+ u64 xqe_pass : 8;
+ u64 wqe_pool_drop : 8;
+ u64 wqe_pool_pass : 8;
+ u64 spb_aura_drop : 8;
+ u64 spb_aura_pass : 8;
+ u64 spb_pool_drop : 8;
+ u64 spb_pool_pass : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
+ u64 lpb_aura_pass : 8;
+ u64 lpb_pool_drop : 8;
+ u64 lpb_pool_pass : 8;
+ u64 rsvd_291_288 : 4;
+ u64 rq_int : 8;
+ u64 rq_int_ena : 8;
+ u64 qint_idx : 7;
+ u64 rsvd_319_315 : 5;
+ u64 ltag : 24; /* W5 */
+ u64 good_utag : 8;
+ u64 bad_utag : 8;
+ u64 flow_tagw : 6;
+ u64 ipsec_vwqe : 1;
+ u64 vwqe_ena : 1;
+ u64 vwqe_wait : 8;
+ u64 max_vsize_exp : 4;
+ u64 vwqe_skip : 2;
+ u64 rsvd_383_382 : 2;
+ u64 octs : 48; /* W6 */
+ u64 rsvd_447_432 : 16;
+ u64 pkts : 48; /* W7 */
+ u64 rsvd_511_496 : 16;
+ u64 drop_octs : 48; /* W8 */
+ u64 rsvd_575_560 : 16;
+ u64 drop_pkts : 48; /* W9 */
+ u64 rsvd_639_624 : 16;
+ u64 re_pkts : 48; /* W10 */
+ u64 rsvd_703_688 : 16;
+ u64 rsvd_767_704; /* W11 */
+ u64 rsvd_831_768; /* W12 */
+ u64 rsvd_895_832; /* W13 */
+ u64 rsvd_959_896; /* W14 */
+ u64 rsvd_1023_960; /* W15 */
+};
+
+/* CN10K NIX Send queue context structure */
+struct nix_cn10k_sq_ctx_s {
+ u64 ena : 1;
+ u64 qint_idx : 6;
+ u64 substream : 20;
+ u64 sdp_mcast : 1;
+ u64 cq : 20;
+ u64 sqe_way_mask : 16;
+ u64 smq : 10; /* W1 */
+ u64 cq_ena : 1;
+ u64 xoff : 1;
+ u64 sso_ena : 1;
+ u64 smq_rr_weight : 14;
+ u64 default_chan : 12;
+ u64 sqb_count : 16;
+ u64 rsvd_120_119 : 2;
+ u64 smq_rr_count_lb : 7;
+ u64 smq_rr_count_ub : 25; /* W2 */
+ u64 sqb_aura : 20;
+ u64 sq_int : 8;
+ u64 sq_int_ena : 8;
+ u64 sqe_stype : 2;
+ u64 rsvd_191 : 1;
+ u64 max_sqe_size : 2; /* W3 */
+ u64 cq_limit : 8;
+ u64 lmt_dis : 1;
+ u64 mnq_dis : 1;
+ u64 smq_next_sq : 20;
+ u64 smq_lso_segnum : 8;
+ u64 tail_offset : 6;
+ u64 smenq_offset : 6;
+ u64 head_offset : 6;
+ u64 smenq_next_sqb_vld : 1;
+ u64 smq_pend : 1;
+ u64 smq_next_sq_vld : 1;
+ u64 rsvd_255_253 : 3;
+ u64 next_sqb : 64; /* W4 */
+ u64 tail_sqb : 64; /* W5 */
+ u64 smenq_sqb : 64; /* W6 */
+ u64 smenq_next_sqb : 64; /* W7 */
+ u64 head_sqb : 64; /* W8 */
+ u64 rsvd_583_576 : 8; /* W9 */
+ u64 vfi_lso_total : 18;
+ u64 vfi_lso_sizem1 : 3;
+ u64 vfi_lso_sb : 8;
+ u64 vfi_lso_mps : 14;
+ u64 vfi_lso_vlan0_ins_ena : 1;
+ u64 vfi_lso_vlan1_ins_ena : 1;
+ u64 vfi_lso_vld : 1;
+ u64 rsvd_639_630 : 10;
+ u64 scm_lso_rem : 18; /* W10 */
+ u64 rsvd_703_658 : 46;
+ u64 octs : 48; /* W11 */
+ u64 rsvd_767_752 : 16;
+ u64 pkts : 48; /* W12 */
+ u64 rsvd_831_816 : 16;
+ u64 rsvd_895_832 : 64; /* W13 */
+ u64 dropped_octs : 48;
+ u64 rsvd_959_944 : 16;
+ u64 dropped_pkts : 48;
+ u64 rsvd_1023_1008 : 16;
};
/* NIX Receive queue context structure */
struct nix_rq_ctx_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- u64 wqe_aura : 20;
- u64 substream : 20;
- u64 cq : 20;
- u64 ena_wqwd : 1;
- u64 ipsech_ena : 1;
- u64 sso_ena : 1;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 sso_ena : 1;
u64 ipsech_ena : 1;
@@ -538,19 +511,6 @@ struct nix_rq_ctx_s {
u64 cq : 20;
u64 substream : 20;
u64 wqe_aura : 20;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 rsvd_127_122 : 6;
- u64 lpb_drop_ena : 1;
- u64 spb_drop_ena : 1;
- u64 xqe_drop_ena : 1;
- u64 wqe_caching : 1;
- u64 pb_caching : 2;
- u64 sso_tt : 2;
- u64 sso_grp : 10;
- u64 lpb_aura : 20;
- u64 spb_aura : 20;
-#else
u64 spb_aura : 20;
u64 lpb_aura : 20;
u64 sso_grp : 10;
@@ -561,23 +521,7 @@ struct nix_rq_ctx_s {
u64 spb_drop_ena : 1;
u64 lpb_drop_ena : 1;
u64 rsvd_127_122 : 6;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 xqe_hdr_split : 1;
- u64 xqe_imm_copy : 1;
- u64 rsvd_189_184 : 6;
- u64 xqe_imm_size : 6;
- u64 later_skip : 6;
- u64 rsvd_171 : 1;
- u64 first_skip : 7;
- u64 lpb_sizem1 : 12;
- u64 spb_ena : 1;
- u64 rsvd_150_148 : 3;
- u64 wqe_skip : 2;
- u64 spb_sizem1 : 6;
- u64 rsvd_139_128 : 12;
-#else
- u64 rsvd_139_128 : 12;
+ u64 rsvd_139_128 : 12; /* W2 */
u64 spb_sizem1 : 6;
u64 wqe_skip : 2;
u64 rsvd_150_148 : 3;
@@ -590,18 +534,7 @@ struct nix_rq_ctx_s {
u64 rsvd_189_184 : 6;
u64 xqe_imm_copy : 1;
u64 xqe_hdr_split : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 spb_pool_pass : 8;
- u64 spb_pool_drop : 8;
- u64 spb_aura_pass : 8;
- u64 spb_aura_drop : 8;
- u64 wqe_pool_pass : 8;
- u64 wqe_pool_drop : 8;
- u64 xqe_pass : 8;
- u64 xqe_drop : 8;
-#else
- u64 xqe_drop : 8;
+ u64 xqe_drop : 8; /* W3*/
u64 xqe_pass : 8;
u64 wqe_pool_drop : 8;
u64 wqe_pool_pass : 8;
@@ -609,19 +542,7 @@ struct nix_rq_ctx_s {
u64 spb_aura_pass : 8;
u64 spb_pool_drop : 8;
u64 spb_pool_pass : 8;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W4 */
- u64 rsvd_319_315 : 5;
- u64 qint_idx : 7;
- u64 rq_int_ena : 8;
- u64 rq_int : 8;
- u64 rsvd_291_288 : 4;
- u64 lpb_pool_pass : 8;
- u64 lpb_pool_drop : 8;
- u64 lpb_aura_pass : 8;
- u64 lpb_aura_drop : 8;
-#else
- u64 lpb_aura_drop : 8;
+ u64 lpb_aura_drop : 8; /* W4 */
u64 lpb_aura_pass : 8;
u64 lpb_pool_drop : 8;
u64 lpb_pool_pass : 8;
@@ -630,55 +551,21 @@ struct nix_rq_ctx_s {
u64 rq_int_ena : 8;
u64 qint_idx : 7;
u64 rsvd_319_315 : 5;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W5 */
- u64 rsvd_383_366 : 18;
- u64 flow_tagw : 6;
- u64 bad_utag : 8;
- u64 good_utag : 8;
- u64 ltag : 24;
-#else
- u64 ltag : 24;
+ u64 ltag : 24; /* W5 */
u64 good_utag : 8;
u64 bad_utag : 8;
u64 flow_tagw : 6;
u64 rsvd_383_366 : 18;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W6 */
- u64 rsvd_447_432 : 16;
- u64 octs : 48;
-#else
- u64 octs : 48;
+ u64 octs : 48; /* W6 */
u64 rsvd_447_432 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W7 */
- u64 rsvd_511_496 : 16;
- u64 pkts : 48;
-#else
- u64 pkts : 48;
+ u64 pkts : 48; /* W7 */
u64 rsvd_511_496 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W8 */
+ u64 drop_octs : 48; /* W8 */
u64 rsvd_575_560 : 16;
- u64 drop_octs : 48;
-#else
- u64 drop_octs : 48;
- u64 rsvd_575_560 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 rsvd_639_624 : 16;
- u64 drop_pkts : 48;
-#else
- u64 drop_pkts : 48;
+ u64 drop_pkts : 48; /* W9 */
u64 rsvd_639_624 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
+ u64 re_pkts : 48; /* W10 */
u64 rsvd_703_688 : 16;
- u64 re_pkts : 48;
-#else
- u64 re_pkts : 48;
- u64 rsvd_703_688 : 16;
-#endif
u64 rsvd_767_704; /* W11 */
u64 rsvd_831_768; /* W12 */
u64 rsvd_895_832; /* W13 */
@@ -701,30 +588,12 @@ enum nix_stype {
/* NIX Send queue context structure */
struct nix_sq_ctx_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- u64 sqe_way_mask : 16;
- u64 cq : 20;
- u64 sdp_mcast : 1;
- u64 substream : 20;
- u64 qint_idx : 6;
- u64 ena : 1;
-#else
u64 ena : 1;
u64 qint_idx : 6;
u64 substream : 20;
u64 sdp_mcast : 1;
u64 cq : 20;
u64 sqe_way_mask : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W1 */
- u64 sqb_count : 16;
- u64 default_chan : 12;
- u64 smq_rr_quantum : 24;
- u64 sso_ena : 1;
- u64 xoff : 1;
- u64 cq_ena : 1;
- u64 smq : 9;
-#else
u64 smq : 9;
u64 cq_ena : 1;
u64 xoff : 1;
@@ -732,37 +601,12 @@ struct nix_sq_ctx_s {
u64 smq_rr_quantum : 24;
u64 default_chan : 12;
u64 sqb_count : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W2 */
- u64 rsvd_191 : 1;
- u64 sqe_stype : 2;
- u64 sq_int_ena : 8;
- u64 sq_int : 8;
- u64 sqb_aura : 20;
- u64 smq_rr_count : 25;
-#else
u64 smq_rr_count : 25;
u64 sqb_aura : 20;
u64 sq_int : 8;
u64 sq_int_ena : 8;
u64 sqe_stype : 2;
u64 rsvd_191 : 1;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W3 */
- u64 rsvd_255_253 : 3;
- u64 smq_next_sq_vld : 1;
- u64 smq_pend : 1;
- u64 smenq_next_sqb_vld : 1;
- u64 head_offset : 6;
- u64 smenq_offset : 6;
- u64 tail_offset : 6;
- u64 smq_lso_segnum : 8;
- u64 smq_next_sq : 20;
- u64 mnq_dis : 1;
- u64 lmt_dis : 1;
- u64 cq_limit : 8;
- u64 max_sqe_size : 2;
-#else
u64 max_sqe_size : 2;
u64 cq_limit : 8;
u64 lmt_dis : 1;
@@ -776,23 +620,11 @@ struct nix_sq_ctx_s {
u64 smq_pend : 1;
u64 smq_next_sq_vld : 1;
u64 rsvd_255_253 : 3;
-#endif
u64 next_sqb : 64;/* W4 */
u64 tail_sqb : 64;/* W5 */
u64 smenq_sqb : 64;/* W6 */
u64 smenq_next_sqb : 64;/* W7 */
u64 head_sqb : 64;/* W8 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W9 */
- u64 rsvd_639_630 : 10;
- u64 vfi_lso_vld : 1;
- u64 vfi_lso_vlan1_ins_ena : 1;
- u64 vfi_lso_vlan0_ins_ena : 1;
- u64 vfi_lso_mps : 14;
- u64 vfi_lso_sb : 8;
- u64 vfi_lso_sizem1 : 3;
- u64 vfi_lso_total : 18;
- u64 rsvd_583_576 : 8;
-#else
u64 rsvd_583_576 : 8;
u64 vfi_lso_total : 18;
u64 vfi_lso_sizem1 : 3;
@@ -802,68 +634,28 @@ struct nix_sq_ctx_s {
u64 vfi_lso_vlan1_ins_ena : 1;
u64 vfi_lso_vld : 1;
u64 rsvd_639_630 : 10;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W10 */
- u64 rsvd_703_658 : 46;
- u64 scm_lso_rem : 18;
-#else
u64 scm_lso_rem : 18;
u64 rsvd_703_658 : 46;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W11 */
- u64 rsvd_767_752 : 16;
- u64 octs : 48;
-#else
u64 octs : 48;
u64 rsvd_767_752 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W12 */
- u64 rsvd_831_816 : 16;
- u64 pkts : 48;
-#else
u64 pkts : 48;
u64 rsvd_831_816 : 16;
-#endif
u64 rsvd_895_832 : 64;/* W13 */
-#if defined(__BIG_ENDIAN_BITFIELD) /* W14 */
- u64 rsvd_959_944 : 16;
- u64 dropped_octs : 48;
-#else
u64 dropped_octs : 48;
u64 rsvd_959_944 : 16;
-#endif
-#if defined(__BIG_ENDIAN_BITFIELD) /* W15 */
- u64 rsvd_1023_1008 : 16;
- u64 dropped_pkts : 48;
-#else
u64 dropped_pkts : 48;
u64 rsvd_1023_1008 : 16;
-#endif
};
/* NIX Receive side scaling entry structure*/
struct nix_rsse_s {
-#if defined(__BIG_ENDIAN_BITFIELD)
- uint32_t reserved_20_31 : 12;
- uint32_t rq : 20;
-#else
uint32_t rq : 20;
uint32_t reserved_20_31 : 12;
-#endif
};
/* NIX receive multicast/mirror entry structure */
struct nix_rx_mce_s {
-#if defined(__BIG_ENDIAN_BITFIELD) /* W0 */
- uint64_t next : 16;
- uint64_t pf_func : 16;
- uint64_t rsvd_31_24 : 8;
- uint64_t index : 20;
- uint64_t eol : 1;
- uint64_t rsvd_2 : 1;
- uint64_t op : 2;
-#else
uint64_t op : 2;
uint64_t rsvd_2 : 1;
uint64_t eol : 1;
@@ -871,7 +663,6 @@ struct nix_rx_mce_s {
uint64_t rsvd_31_24 : 8;
uint64_t pf_func : 16;
uint64_t next : 16;
-#endif
};
enum nix_lsoalg {
@@ -890,15 +681,6 @@ enum nix_txlayer {
};
struct nix_lso_format {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 rsvd_19_63 : 45;
- u64 alg : 3;
- u64 rsvd_14_15 : 2;
- u64 sizem1 : 2;
- u64 rsvd_10_11 : 2;
- u64 layer : 2;
- u64 offset : 8;
-#else
u64 offset : 8;
u64 layer : 2;
u64 rsvd_10_11 : 2;
@@ -906,24 +688,9 @@ struct nix_lso_format {
u64 rsvd_14_15 : 2;
u64 alg : 3;
u64 rsvd_19_63 : 45;
-#endif
};
struct nix_rx_flowkey_alg {
-#if defined(__BIG_ENDIAN_BITFIELD)
- u64 reserved_35_63 :29;
- u64 ltype_match :4;
- u64 ltype_mask :4;
- u64 sel_chan :1;
- u64 ena :1;
- u64 reserved_24_24 :1;
- u64 lid :3;
- u64 bytesm1 :5;
- u64 hdr_offset :8;
- u64 fn_mask :1;
- u64 ln_mask :1;
- u64 key_offset :6;
-#else
u64 key_offset :6;
u64 ln_mask :1;
u64 fn_mask :1;
@@ -936,7 +703,6 @@ struct nix_rx_flowkey_alg {
u64 ltype_mask :4;
u64 ltype_match :4;
u64 reserved_35_63 :29;
-#endif
};
/* NIX VTAG size */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 4193ae3bde6b..745aa8a19499 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -3,11 +3,11 @@
# Makefile for Marvell's OcteonTX2 ethernet device drivers
#
-obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
-obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o
-octeontx2_nicvf-y := otx2_vf.o
+rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_ptp.o otx2_flows.o cn10k.o
+rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
new file mode 100644
index 000000000000..9ec0313f13fc
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#include "cn10k.h"
+#include "otx2_reg.h"
+#include "otx2_struct.h"
+
+static struct dev_hw_ops otx2_hw_ops = {
+ .sq_aq_init = otx2_sq_aq_init,
+ .sqe_flush = otx2_sqe_flush,
+ .aura_freeptr = otx2_aura_freeptr,
+ .refill_pool_ptrs = otx2_refill_pool_ptrs,
+};
+
+static struct dev_hw_ops cn10k_hw_ops = {
+ .sq_aq_init = cn10k_sq_aq_init,
+ .sqe_flush = cn10k_sqe_flush,
+ .aura_freeptr = cn10k_aura_freeptr,
+ .refill_pool_ptrs = cn10k_refill_pool_ptrs,
+};
+
+int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+{
+ int size, num_lines;
+ u64 base;
+
+ if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
+ pf->hw_ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ pf->hw_ops = &cn10k_hw_ops;
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ (MBOX_SIZE * (pf->total_vfs + 1));
+
+ size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
+ (MBOX_SIZE * (pf->total_vfs + 1));
+
+ pf->hw.lmt_base = ioremap(base, size);
+
+ if (!pf->hw.lmt_base) {
+ dev_err(pf->dev, "Unable to map PF LMTST region\n");
+ return -ENOMEM;
+ }
+
+ /* FIXME: Get the num of LMTST lines from LMT table */
+ pf->tot_lmt_lines = size / LMT_LINE_SIZE;
+ num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
+ pf->hw.tx_queues;
+ /* Number of LMT lines per SQ queues */
+ pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
+
+ pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
+ return 0;
+}
+
+int cn10k_vf_lmtst_init(struct otx2_nic *vf)
+{
+ int size, num_lines;
+
+ if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
+ vf->hw_ops = &otx2_hw_ops;
+ return 0;
+ }
+
+ vf->hw_ops = &cn10k_hw_ops;
+ size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
+ vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
+ PCI_MBOX_BAR_NUM),
+ size);
+ if (!vf->hw.lmt_base) {
+ dev_err(vf->dev, "Unable to map VF LMTST region\n");
+ return -ENOMEM;
+ }
+
+ vf->tot_lmt_lines = size / LMT_LINE_SIZE;
+ /* LMTST lines per SQ */
+ num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
+ vf->hw.tx_queues;
+ vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
+ vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
+ return 0;
+}
+EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ struct otx2_nic *pfvf = dev;
+ struct otx2_snd_queue *sq;
+
+ sq = &pfvf->qset.sq[qidx];
+ sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
+ (qidx * pfvf->nix_lmt_size));
+
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ /* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/
+ aq->sq.smq_rr_weight = pfvf->netdev->mtu;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+#define NPA_MAX_BURST 16
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ u64 ptrs[NPA_MAX_BURST];
+ int num_ptrs = 1;
+ dma_addr_t bufptr;
+
+ /* Refill pool with new buffers */
+ while (cq->pool_ptrs) {
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr)) {
+ if (num_ptrs--)
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs,
+ cq->rbpool->lmt_addr);
+ break;
+ }
+ cq->pool_ptrs--;
+ ptrs[num_ptrs] = (u64)bufptr + OTX2_HEAD_ROOM;
+ num_ptrs++;
+ if (num_ptrs == NPA_MAX_BURST || cq->pool_ptrs == 0) {
+ __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs,
+ num_ptrs,
+ cq->rbpool->lmt_addr);
+ num_ptrs = 1;
+ }
+ }
+}
+
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
+{
+ struct otx2_nic *pfvf = dev;
+ int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
+ u64 val = 0, tar_addr = 0;
+
+ /* FIXME: val[0:10] LMT_ID.
+ * [12:15] no of LMTST - 1 in the burst.
+ * [19:63] data size of each LMTST in the burst except first.
+ */
+ val = (lmt_id & 0x7FF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are present.
+ * tar_addr[6:4] size of first LMTST - 1 in units of 128b.
+ */
+ tar_addr |= sq->io_addr | (((size / 16) - 1) & 0x7) << 4;
+ dma_wmb();
+ memcpy(sq->lmt_addr, sq->sqe_base, size);
+ cn10k_lmt_flush(val, tar_addr);
+
+ sq->head++;
+ sq->head &= (sq->sqe_cnt - 1);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
new file mode 100644
index 000000000000..e0bc595cbb78
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Marvell OcteonTx2 RVU Ethernet driver
+ *
+ * Copyright (C) 2020 Marvell.
+ */
+
+#ifndef CN10K_H
+#define CN10K_H
+
+#include "otx2_common.h"
+
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_pf_lmtst_init(struct otx2_nic *pf);
+int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index e6869435e1f3..cf7875d51d87 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -15,6 +15,7 @@
#include "otx2_reg.h"
#include "otx2_common.h"
#include "otx2_struct.h"
+#include "cn10k.h"
static void otx2_nix_rq_op_stats(struct queue_stats *stats,
struct otx2_nic *pfvf, int qidx)
@@ -60,6 +61,19 @@ void otx2_update_lmac_stats(struct otx2_nic *pfvf)
mutex_unlock(&pfvf->mbox.lock);
}
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+
+ if (!netif_running(pfvf->netdev))
+ return;
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_fec_stats(&pfvf->mbox);
+ if (req)
+ otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+}
+
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx)
{
struct otx2_rcv_queue *rq = &pfvf->qset.rq[qidx];
@@ -216,7 +230,6 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- pfvf->max_frs = mtu + OTX2_ETH_HLEN;
req->maxlen = pfvf->max_frs;
err = otx2_sync_mbox_msg(&pfvf->mbox);
@@ -270,14 +283,17 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
return err;
}
-int otx2_set_rss_table(struct otx2_nic *pfvf)
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ const int index = rss->rss_size * ctx_id;
struct mbox *mbox = &pfvf->mbox;
+ struct otx2_rss_ctx *rss_ctx;
struct nix_aq_enq_req *aq;
int idx, err;
mutex_lock(&mbox->lock);
+ rss_ctx = rss->rss_ctx[ctx_id];
/* Get memory to put this msg */
for (idx = 0; idx < rss->rss_size; idx++) {
aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
@@ -297,10 +313,10 @@ int otx2_set_rss_table(struct otx2_nic *pfvf)
}
}
- aq->rss.rq = rss->ind_tbl[idx];
+ aq->rss.rq = rss_ctx->ind_tbl[idx];
/* Fill AQ info */
- aq->qidx = idx;
+ aq->qidx = index + idx;
aq->ctype = NIX_AQ_CTYPE_RSS;
aq->op = NIX_AQ_INSTOP_INIT;
}
@@ -335,9 +351,10 @@ void otx2_set_rss_key(struct otx2_nic *pfvf)
int otx2_rss_init(struct otx2_nic *pfvf)
{
struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ struct otx2_rss_ctx *rss_ctx;
int idx, ret = 0;
- rss->rss_size = sizeof(rss->ind_tbl);
+ rss->rss_size = sizeof(*rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP]);
/* Init RSS key if it is not setup already */
if (!rss->enable)
@@ -345,13 +362,19 @@ int otx2_rss_init(struct otx2_nic *pfvf)
otx2_set_rss_key(pfvf);
if (!netif_is_rxfh_configured(pfvf->netdev)) {
- /* Default indirection table */
+ /* Set RSS group 0 as default indirection table */
+ rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP] = kzalloc(rss->rss_size,
+ GFP_KERNEL);
+ if (!rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP])
+ return -ENOMEM;
+
+ rss_ctx = rss->rss_ctx[DEFAULT_RSS_CONTEXT_GROUP];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] =
+ rss_ctx->ind_tbl[idx] =
ethtool_rxfh_indir_default(idx,
pfvf->hw.rx_queues);
}
- ret = otx2_set_rss_table(pfvf);
+ ret = otx2_set_rss_table(pfvf, DEFAULT_RSS_CONTEXT_GROUP);
if (ret)
return ret;
@@ -473,34 +496,54 @@ void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
(pfvf->hw.cq_ecount_wait - 1));
}
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t iova;
u8 *buf;
- buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN);
+ buf = napi_alloc_frag_align(pool->rbsize, OTX2_ALIGN);
if (unlikely(!buf))
return -ENOMEM;
- buf = PTR_ALIGN(buf, OTX2_ALIGN);
- iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
+ *dma = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
- if (unlikely(dma_mapping_error(pfvf->dev, iova))) {
+ if (unlikely(dma_mapping_error(pfvf->dev, *dma))) {
page_frag_free(buf);
return -ENOMEM;
}
- return iova;
+ return 0;
}
-static dma_addr_t otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool)
+static int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma)
{
- dma_addr_t addr;
+ int ret;
local_bh_disable();
- addr = __otx2_alloc_rbuf(pfvf, pool);
+ ret = __otx2_alloc_rbuf(pfvf, pool, dma);
local_bh_enable();
- return addr;
+ return ret;
+}
+
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma)
+{
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ return -ENOMEM;
+ }
+ return 0;
}
void otx2_tx_timeout(struct net_device *netdev, unsigned int txq)
@@ -562,8 +605,8 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((OTX2_MAX_MTU + OTX2_ETH_HLEN) << 8) |
- OTX2_MIN_MTU;
+ req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
+ | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
@@ -705,9 +748,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
#define RQ_PASS_LVL_AURA (255 - ((95 * 256) / 100)) /* RED when 95% is full */
#define RQ_DROP_LVL_AURA (255 - ((99 * 256) / 100)) /* Drop when 99% is full */
-/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
-#define SEND_CQ_SKID 2000
-
static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
@@ -741,11 +781,48 @@ static int otx2_rq_init(struct otx2_nic *pfvf, u16 qidx, u16 lpb_aura)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
+{
+ struct otx2_nic *pfvf = dev;
+ struct otx2_snd_queue *sq;
+ struct nix_aq_enq_req *aq;
+
+ sq = &pfvf->qset.sq[qidx];
+ sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
+ /* Get memory to put this msg */
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ aq->sq.cq = pfvf->hw.rx_queues + qidx;
+ aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
+ aq->sq.cq_ena = 1;
+ aq->sq.ena = 1;
+ /* Only one SMQ is allocated, map all SQ's to that SMQ */
+ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq_rr_quantum = DFLT_RR_QTM;
+ aq->sq.default_chan = pfvf->hw.tx_chan_base;
+ aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
+ aq->sq.sqb_aura = sqb_aura;
+ aq->sq.sq_int_ena = NIX_SQINT_BITS;
+ aq->sq.qint_idx = 0;
+ /* Due pipelining impact minimum 2000 unused SQ CQE's
+ * need to maintain to avoid CQ overflow.
+ */
+ aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt));
+
+ /* Fill AQ info */
+ aq->qidx = qidx;
+ aq->ctype = NIX_AQ_CTYPE_SQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
{
struct otx2_qset *qset = &pfvf->qset;
struct otx2_snd_queue *sq;
- struct nix_aq_enq_req *aq;
struct otx2_pool *pool;
int err;
@@ -782,40 +859,13 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
sq->sqe_thresh = ((sq->num_sqbs * sq->sqe_per_sqb) * 10) / 100;
sq->aura_id = sqb_aura;
sq->aura_fc_addr = pool->fc_addr->base;
- sq->lmt_addr = (__force u64 *)(pfvf->reg_base + LMT_LF_LMTLINEX(qidx));
sq->io_addr = (__force u64)otx2_get_regaddr(pfvf, NIX_LF_OP_SENDX(0));
sq->stats.bytes = 0;
sq->stats.pkts = 0;
- /* Get memory to put this msg */
- aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
- if (!aq)
- return -ENOMEM;
-
- aq->sq.cq = pfvf->hw.rx_queues + qidx;
- aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
- aq->sq.cq_ena = 1;
- aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = DFLT_RR_QTM;
- aq->sq.default_chan = pfvf->hw.tx_chan_base;
- aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
- aq->sq.sqb_aura = sqb_aura;
- aq->sq.sq_int_ena = NIX_SQINT_BITS;
- aq->sq.qint_idx = 0;
- /* Due pipelining impact minimum 2000 unused SQ CQE's
- * need to maintain to avoid CQ overflow.
- */
- aq->sq.cq_limit = ((SEND_CQ_SKID * 256) / (sq->sqe_cnt));
-
- /* Fill AQ info */
- aq->qidx = qidx;
- aq->ctype = NIX_AQ_CTYPE_SQ;
- aq->op = NIX_AQ_INSTOP_INIT;
+ return pfvf->hw_ops->sq_aq_init(pfvf, qidx, sqb_aura);
- return otx2_sync_mbox_msg(&pfvf->mbox);
}
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
@@ -894,7 +944,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
struct refill_work *wrk;
int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
- s64 bufptr;
+ dma_addr_t bufptr;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
@@ -904,8 +954,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
free_ptrs = cq->pool_ptrs;
while (cq->pool_ptrs) {
- bufptr = otx2_alloc_rbuf(pfvf, rbpool);
- if (bufptr <= 0) {
+ if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
/* Schedule a WQ if we fails to free atleast half of the
* pointers else enable napi for this RQ.
*/
@@ -920,7 +969,7 @@ static void otx2_pool_refill_task(struct work_struct *work)
}
return;
}
- otx2_aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
cq->refill_task_sched = false;
@@ -987,7 +1036,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
nixlf->sq_cnt = pfvf->hw.tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
- nixlf->rss_grps = 1; /* Single RSS indir table supported, for now */
+ nixlf->rss_grps = MAX_RSS_GROUPS;
nixlf->xqe_sz = NIX_XQESZ_W16;
/* We don't know absolute NPA LF idx attached.
* AF will replace 'RVU_DEFAULT_PF_FUNC' with
@@ -1164,6 +1213,11 @@ static int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
pool->rbsize = buf_size;
+ /* Set LMTST addr for NPA batch free */
+ if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag))
+ pool->lmt_addr = (__force u64 *)((u64)pfvf->hw.npa_lmt_base +
+ (pool_id * LMT_LINE_SIZE));
+
/* Initialize this pool's context via AF */
aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
if (!aq) {
@@ -1204,8 +1258,8 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
struct otx2_hw *hw = &pfvf->hw;
struct otx2_snd_queue *sq;
struct otx2_pool *pool;
+ dma_addr_t bufptr;
int err, ptr;
- s64 bufptr;
/* Calculate number of SQBs needed.
*
@@ -1250,10 +1304,9 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
return -ENOMEM;
for (ptr = 0; ptr < num_sqbs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
- otx2_aura_freeptr(pfvf, pool_id, bufptr);
+ if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+ return -ENOMEM;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
}
}
@@ -1271,7 +1324,7 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
int stack_pages, pool_id, rq;
struct otx2_pool *pool;
int err, ptr, num_ptrs;
- s64 bufptr;
+ dma_addr_t bufptr;
num_ptrs = pfvf->qset.rqe_cnt;
@@ -1301,11 +1354,10 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
pool = &pfvf->qset.pool[pool_id];
for (ptr = 0; ptr < num_ptrs; ptr++) {
- bufptr = otx2_alloc_rbuf(pfvf, pool);
- if (bufptr <= 0)
- return bufptr;
- otx2_aura_freeptr(pfvf, pool_id,
- bufptr + OTX2_HEAD_ROOM);
+ if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+ return -ENOMEM;
+ pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
+ bufptr + OTX2_HEAD_ROOM);
}
}
@@ -1482,6 +1534,13 @@ void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
pfvf->hw.cgx_tx_stats[id] = rsp->tx_stats[id];
}
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp)
+{
+ pfvf->hw.cgx_fec_corr_blks += rsp->fec_corr_blks;
+ pfvf->hw.cgx_fec_uncorr_blks += rsp->fec_uncorr_blks;
+}
+
void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp)
{
@@ -1577,6 +1636,46 @@ void otx2_set_cints_affinity(struct otx2_nic *pfvf)
}
}
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf)
+{
+ struct nix_hw_info *rsp;
+ struct msg_req *req;
+ u16 max_mtu;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_get_hw_info(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!rc) {
+ rsp = (struct nix_hw_info *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+
+ /* HW counts VLAN insertion bytes (8 for double tag)
+ * irrespective of whether SQE is requesting to insert VLAN
+ * in the packet or not. Hence these 8 bytes have to be
+ * discounted from max packet size otherwise HW will throw
+ * SMQ errors
+ */
+ max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to get MTU from hardware setting default value(1500)\n");
+ max_mtu = 1500;
+ }
+ return max_mtu;
+}
+EXPORT_SYMBOL(otx2_get_max_mtu);
+
#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
int __weak \
otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 103430400a8a..a518c2283f18 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -50,14 +50,21 @@ enum arua_mapped_qtypes {
#define NIX_LF_ERR_VEC 0x81
#define NIX_LF_POISON_VEC 0x82
+/* Send skid of 2000 packets required for CQ size of 4K CQEs. */
+#define SEND_CQ_SKID 2000
+
/* RSS configuration */
+struct otx2_rss_ctx {
+ u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
+};
+
struct otx2_rss_info {
u8 enable;
u32 flowkey_cfg;
u16 rss_size;
- u8 ind_tbl[MAX_RSS_INDIR_TBL_SIZE];
#define RSS_HASH_KEY_SIZE 44 /* 352 bit key */
u8 key[RSS_HASH_KEY_SIZE];
+ struct otx2_rss_ctx *rss_ctx[MAX_RSS_GROUPS];
};
/* NIX (or NPC) RX errors */
@@ -186,7 +193,6 @@ struct otx2_hw {
u8 lso_tsov6_idx;
u8 lso_udpv4_idx;
u8 lso_udpv6_idx;
- u8 hw_tso;
/* MSI-X */
u8 cint_cnt; /* CQ interrupt count */
@@ -200,8 +206,20 @@ struct otx2_hw {
struct otx2_drv_stats drv_stats;
u64 cgx_rx_stats[CGX_RX_STATS_COUNT];
u64 cgx_tx_stats[CGX_TX_STATS_COUNT];
+ u64 cgx_fec_corr_blks;
+ u64 cgx_fec_uncorr_blks;
u8 cgx_links; /* No. of CGX links present in HW */
u8 lbk_links; /* No. of LBK links present in HW */
+#define HW_TSO 0
+#define CN10K_MBOX 1
+#define CN10K_LMTST 2
+ unsigned long cap_flag;
+
+#define LMT_LINE_SIZE 128
+#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
+ void __iomem *lmt_base;
+ u64 *npa_lmt_base;
+ u64 *nix_lmt_base;
};
struct otx2_vf_config {
@@ -260,9 +278,18 @@ struct otx2_flow_config {
struct list_head flow_list;
};
+struct dev_hw_ops {
+ int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
+ void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+ void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ void (*aura_freeptr)(void *dev, int aura, u64 buf);
+};
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
+ struct dev_hw_ops *hw_ops;
void *iommu_domain;
u16 max_frs;
u16 rbsize; /* Receive buffer size */
@@ -311,6 +338,10 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+ /* LMTST Lines info */
+ u16 tot_lmt_lines;
+ u16 nix_lmt_lines;
+ u32 nix_lmt_size;
struct otx2_ptp *ptp;
struct hwtstamp_config tstamp;
@@ -335,6 +366,25 @@ static inline bool is_96xx_B0(struct pci_dev *pdev)
(pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF);
}
+/* REVID for PCIe devices.
+ * Bits 0..1: minor pass, bit 3..2: major pass
+ * bits 7..4: midr id
+ */
+#define PCI_REVISION_ID_96XX 0x00
+#define PCI_REVISION_ID_95XX 0x10
+#define PCI_REVISION_ID_LOKI 0x20
+#define PCI_REVISION_ID_98XX 0x30
+#define PCI_REVISION_ID_95XXMM 0x40
+
+static inline bool is_dev_otx2(struct pci_dev *pdev)
+{
+ u8 midr = pdev->revision & 0xF0;
+
+ return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX ||
+ midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX ||
+ midr == PCI_REVISION_ID_95XXMM);
+}
+
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
@@ -343,10 +393,10 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT;
pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT;
- hw->hw_tso = true;
+ __set_bit(HW_TSO, &hw->cap_flag);
if (is_96xx_A0(pfvf->pdev)) {
- hw->hw_tso = false;
+ __clear_bit(HW_TSO, &hw->cap_flag);
/* Time based irq coalescing is not supported */
pfvf->hw.cq_qcount_wait = 0x0;
@@ -357,6 +407,13 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
pfvf->hw.rq_skid = 600;
pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K);
}
+ if (is_96xx_B0(pfvf->pdev))
+ __clear_bit(HW_TSO, &hw->cap_flag);
+
+ if (!is_dev_otx2(pfvf->pdev)) {
+ __set_bit(CN10K_MBOX, &hw->cap_flag);
+ __set_bit(CN10K_LMTST, &hw->cap_flag);
+ }
}
/* Register read/write APIs */
@@ -465,10 +522,51 @@ static inline u64 otx2_atomic64_add(u64 incr, u64 *ptr)
}
#else
-#define otx2_write128(lo, hi, addr)
+#define otx2_write128(lo, hi, addr) writeq((hi) | (lo), addr)
#define otx2_atomic64_add(incr, ptr) ({ *ptr += incr; })
#endif
+static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
+ u64 *ptrs, u64 num_ptrs,
+ u64 *lmt_addr)
+{
+ u64 size = 0, count_eot = 0;
+ u64 tar_addr, val = 0;
+
+ tar_addr = (__force u64)otx2_get_regaddr(pfvf, NPA_LF_AURA_BATCH_FREE0);
+ /* LMTID is same as AURA Id */
+ val = (aura & 0x7FF) | BIT_ULL(63);
+ /* Set if [127:64] of last 128bit word has a valid pointer */
+ count_eot = (num_ptrs % 2) ? 0ULL : 1ULL;
+ /* Set AURA ID to free pointer */
+ ptrs[0] = (count_eot << 32) | (aura & 0xFFFFF);
+ /* Target address for LMTST flush tells HW how many 128bit
+ * words are valid from NPA_LF_AURA_BATCH_FREE0.
+ *
+ * tar_addr[6:4] is LMTST size-1 in units of 128b.
+ */
+ if (num_ptrs > 2) {
+ size = (sizeof(u64) * num_ptrs) / 16;
+ if (!count_eot)
+ size++;
+ tar_addr |= ((size - 1) & 0x7) << 4;
+ }
+ memcpy(lmt_addr, ptrs, sizeof(u64) * num_ptrs);
+ /* Perform LMTST flush */
+ cn10k_lmt_flush(val, tar_addr);
+}
+
+static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
+{
+ struct otx2_nic *pfvf = dev;
+ struct otx2_pool *pool;
+ u64 ptrs[2];
+
+ pool = &pfvf->qset.pool[aura];
+ ptrs[1] = buf;
+ __cn10k_aura_freeptr(pfvf, aura, ptrs, 2, pool->lmt_addr);
+}
+
/* Alloc pointer from pool/aura */
static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
{
@@ -480,11 +578,12 @@ static inline u64 otx2_aura_allocptr(struct otx2_nic *pfvf, int aura)
}
/* Free pointer to a pool/aura */
-static inline void otx2_aura_freeptr(struct otx2_nic *pfvf,
- int aura, s64 buf)
+static inline void otx2_aura_freeptr(void *dev, int aura, u64 buf)
{
- otx2_write128((u64)buf, (u64)aura | BIT_ULL(63),
- otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0));
+ struct otx2_nic *pfvf = dev;
+ void __iomem *addr = otx2_get_regaddr(pfvf, NPA_LF_AURA_OP_FREE0);
+
+ otx2_write128(buf, (u64)aura | BIT_ULL(63), addr);
}
static inline int otx2_get_pool_idx(struct otx2_nic *pfvf, int type, int idx)
@@ -632,18 +731,23 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl);
int otx2_txsch_alloc(struct otx2_nic *pfvf);
int otx2_txschq_stop(struct otx2_nic *pfvf);
void otx2_sqb_flush(struct otx2_nic *pfvf);
-dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool);
+int __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
+ dma_addr_t *dma);
int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable);
void otx2_ctx_disable(struct mbox *mbox, int type, bool npa);
int otx2_nix_config_bp(struct otx2_nic *pfvf, bool enable);
void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq);
+int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
+int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
+ dma_addr_t *dma);
/* RSS configuration APIs*/
int otx2_rss_init(struct otx2_nic *pfvf);
int otx2_set_flowkey_cfg(struct otx2_nic *pfvf);
void otx2_set_rss_key(struct otx2_nic *pfvf);
-int otx2_set_rss_table(struct otx2_nic *pfvf);
+int otx2_set_rss_table(struct otx2_nic *pfvf, int ctx_id);
/* Mbox handlers */
void mbox_handler_msix_offset(struct otx2_nic *pfvf,
@@ -656,6 +760,9 @@ void mbox_handler_nix_txsch_alloc(struct otx2_nic *pf,
struct nix_txsch_alloc_rsp *rsp);
void mbox_handler_cgx_stats(struct otx2_nic *pfvf,
struct cgx_stats_rsp *rsp);
+void mbox_handler_cgx_fec_stats(struct otx2_nic *pfvf,
+ struct cgx_fec_stats_rsp *rsp);
+void otx2_set_fec_stats_count(struct otx2_nic *pfvf);
void mbox_handler_nix_bp_enable(struct otx2_nic *pfvf,
struct nix_bp_cfg_rsp *rsp);
@@ -664,6 +771,7 @@ void otx2_get_dev_stats(struct otx2_nic *pfvf);
void otx2_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
void otx2_update_lmac_stats(struct otx2_nic *pfvf);
+void otx2_update_lmac_fec_stats(struct otx2_nic *pfvf);
int otx2_update_rq_stats(struct otx2_nic *pfvf, int qidx);
int otx2_update_sq_stats(struct otx2_nic *pfvf, int qidx);
void otx2_set_ethtool_ops(struct net_device *netdev);
@@ -684,13 +792,14 @@ int otx2_get_flow(struct otx2_nic *pfvf,
int otx2_get_all_flows(struct otx2_nic *pfvf,
struct ethtool_rxnfc *nfc, u32 *rule_locs);
int otx2_add_flow(struct otx2_nic *pfvf,
- struct ethtool_rx_flow_spec *fsp);
+ struct ethtool_rxnfc *nfc);
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location);
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct npc_install_flow_req *req);
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id);
int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
-
+u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 67171b66a56c..f4962a97a075 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -14,6 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/log2.h>
#include <linux/net_tstamp.h>
+#include <linux/linkmode.h>
#include "otx2_common.h"
#include "otx2_ptp.h"
@@ -32,6 +33,14 @@ struct otx2_stat {
.index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \
}
+/* Physical link config */
+#define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CCBF //110001110001100110010111111
+
+enum link_mode {
+ OTX2_MODE_SUPPORTED,
+ OTX2_MODE_ADVERTISED
+};
+
static const struct otx2_stat otx2_dev_stats[] = {
OTX2_DEV_STAT(rx_ucast_frames),
OTX2_DEV_STAT(rx_bcast_frames),
@@ -66,6 +75,8 @@ static const unsigned int otx2_n_dev_stats = ARRAY_SIZE(otx2_dev_stats);
static const unsigned int otx2_n_drv_stats = ARRAY_SIZE(otx2_drv_stats);
static const unsigned int otx2_n_queue_stats = ARRAY_SIZE(otx2_queue_stats);
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf);
+
static void otx2_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -128,6 +139,10 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
strcpy(data, "reset_count");
data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Corrected Errors: ");
+ data += ETH_GSTRING_LEN;
+ sprintf(data, "Fec Uncorrected Errors: ");
+ data += ETH_GSTRING_LEN;
}
static void otx2_get_qset_stats(struct otx2_nic *pfvf,
@@ -160,11 +175,30 @@ static void otx2_get_qset_stats(struct otx2_nic *pfvf,
}
}
+static int otx2_get_phy_fec_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int rc = -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_phy_fec_stats(&pfvf->mbox);
+ if (!req)
+ goto end;
+
+ if (!otx2_sync_mbox_msg(&pfvf->mbox))
+ rc = 0;
+end:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
/* Get device and per queue statistics */
static void otx2_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
+ u64 fec_corr_blks, fec_uncorr_blks;
+ struct cgx_fw_data *rsp;
int stat;
otx2_get_dev_stats(pfvf);
@@ -183,6 +217,32 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
*(data++) = pfvf->hw.cgx_tx_stats[stat];
*(data++) = pfvf->reset_count;
+
+ fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
+ fec_uncorr_blks = pfvf->hw.cgx_fec_uncorr_blks;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp) && rsp->fwdata.phy.misc.has_fec_stats &&
+ !otx2_get_phy_fec_stats(pfvf)) {
+ /* Fetch fwdata again because it's been recently populated with
+ * latest PHY FEC stats.
+ */
+ rsp = otx2_get_fwdata(pfvf);
+ if (!IS_ERR(rsp)) {
+ struct fec_stats_s *p = &rsp->fwdata.phy.fec_stats;
+
+ if (pfvf->linfo.fec == OTX2_FEC_BASER) {
+ fec_corr_blks = p->brfec_corr_blks;
+ fec_uncorr_blks = p->brfec_uncorr_blks;
+ } else {
+ fec_corr_blks = p->rsfec_corr_cws;
+ fec_uncorr_blks = p->rsfec_uncorr_cws;
+ }
+ }
+ }
+
+ *(data++) = fec_corr_blks;
+ *(data++) = fec_uncorr_blks;
}
static int otx2_get_sset_count(struct net_device *netdev, int sset)
@@ -195,9 +255,11 @@ static int otx2_get_sset_count(struct net_device *netdev, int sset)
qstats_count = otx2_n_queue_stats *
(pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+ otx2_update_lmac_fec_stats(pfvf);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
- CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + 1;
+ CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
+ + 1;
}
/* Get no of queues device supports and current queue count */
@@ -448,10 +510,14 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_ESP)
+ nfc->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
- case AH_ESP_V6_FLOW:
+ break;
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
@@ -459,6 +525,7 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
default:
return -EINVAL;
}
+
return 0;
}
@@ -527,6 +594,36 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
return -EINVAL;
}
break;
+ case AH_ESP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ switch (nfc->data & rxh_l4) {
+ case 0:
+ rss_cfg &= ~(NIX_FLOW_KEY_TYPE_ESP |
+ NIX_FLOW_KEY_TYPE_AH);
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN |
+ NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ /* If VLAN hashing is also requested for ESP then do not
+ * allow because of hardware 40 bytes flow key limit.
+ */
+ if (rss_cfg & NIX_FLOW_KEY_TYPE_VLAN) {
+ netdev_err(pfvf->netdev,
+ "RSS hash of ESP or AH with VLAN is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rss_cfg |= NIX_FLOW_KEY_TYPE_ESP | NIX_FLOW_KEY_TYPE_AH;
+ /* Disable IPv4 proto hashing since IPv6 SA+DA(32 bytes)
+ * and ESP SPI+sequence(8 bytes) uses hardware maximum
+ * limit of 40 byte flow key.
+ */
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_IPV4_PROTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
case IPV4_FLOW:
case IPV6_FLOW:
rss_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6;
@@ -581,7 +678,7 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc)
break;
case ETHTOOL_SRXCLSRLINS:
if (netif_running(dev) && ntuple)
- ret = otx2_add_flow(pfvf, &nfc->fs);
+ ret = otx2_add_flow(pfvf, nfc);
break;
case ETHTOOL_SRXCLSRLDEL:
if (netif_running(dev) && ntuple)
@@ -641,42 +738,50 @@ static u32 otx2_get_rxfh_key_size(struct net_device *netdev)
static u32 otx2_get_rxfh_indir_size(struct net_device *dev)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
-
- return pfvf->hw.rss_info.rss_size;
+ return MAX_RSS_INDIR_TBL_SIZE;
}
-/* Get RSS configuration */
-static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
- u8 *hkey, u8 *hfunc)
+static int otx2_rss_ctx_delete(struct otx2_nic *pfvf, int ctx_id)
{
- struct otx2_nic *pfvf = netdev_priv(dev);
- struct otx2_rss_info *rss;
- int idx;
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
- rss = &pfvf->hw.rss_info;
+ otx2_rss_ctx_flow_del(pfvf, ctx_id);
+ kfree(rss->rss_ctx[ctx_id]);
+ rss->rss_ctx[ctx_id] = NULL;
- if (indir) {
- for (idx = 0; idx < rss->rss_size; idx++)
- indir[idx] = rss->ind_tbl[idx];
- }
+ return 0;
+}
- if (hkey)
- memcpy(hkey, rss->key, sizeof(rss->key));
+static int otx2_rss_ctx_create(struct otx2_nic *pfvf,
+ u32 *rss_context)
+{
+ struct otx2_rss_info *rss = &pfvf->hw.rss_info;
+ u8 ctx;
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
+ for (ctx = 0; ctx < MAX_RSS_GROUPS; ctx++) {
+ if (!rss->rss_ctx[ctx])
+ break;
+ }
+ if (ctx == MAX_RSS_GROUPS)
+ return -EINVAL;
+
+ rss->rss_ctx[ctx] = kzalloc(sizeof(*rss->rss_ctx[ctx]), GFP_KERNEL);
+ if (!rss->rss_ctx[ctx])
+ return -ENOMEM;
+ *rss_context = ctx;
return 0;
}
-/* Configure RSS table and hash key */
-static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, const u8 hfunc)
+/* RSS context configuration */
+static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc,
+ u32 *rss_context, bool delete)
{
struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
struct otx2_rss_info *rss;
- int idx;
+ int ret, idx;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
@@ -688,20 +793,85 @@ static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
return -EIO;
}
+ if (hkey) {
+ memcpy(rss->key, hkey, sizeof(rss->key));
+ otx2_set_rss_key(pfvf);
+ }
+ if (delete)
+ return otx2_rss_ctx_delete(pfvf, *rss_context);
+
+ if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+ ret = otx2_rss_ctx_create(pfvf, rss_context);
+ if (ret)
+ return ret;
+ }
if (indir) {
+ rss_ctx = rss->rss_ctx[*rss_context];
for (idx = 0; idx < rss->rss_size; idx++)
- rss->ind_tbl[idx] = indir[idx];
+ rss_ctx->ind_tbl[idx] = indir[idx];
}
+ otx2_set_rss_table(pfvf, *rss_context);
- if (hkey) {
- memcpy(rss->key, hkey, sizeof(rss->key));
- otx2_set_rss_key(pfvf);
+ return 0;
+}
+
+static int otx2_get_rxfh_context(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc, u32 rss_context)
+{
+ struct otx2_nic *pfvf = netdev_priv(dev);
+ struct otx2_rss_ctx *rss_ctx;
+ struct otx2_rss_info *rss;
+ int idx, rx_queues;
+
+ rss = &pfvf->hw.rss_info;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ if (!rss->enable && rss_context == DEFAULT_RSS_CONTEXT_GROUP) {
+ rx_queues = pfvf->hw.rx_queues;
+ for (idx = 0; idx < MAX_RSS_INDIR_TBL_SIZE; idx++)
+ indir[idx] = ethtool_rxfh_indir_default(idx, rx_queues);
+ return 0;
+ }
+ if (rss_context >= MAX_RSS_GROUPS)
+ return -ENOENT;
+
+ rss_ctx = rss->rss_ctx[rss_context];
+ if (!rss_ctx)
+ return -ENOENT;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss_ctx->ind_tbl[idx];
}
+ if (hkey)
+ memcpy(hkey, rss->key, sizeof(rss->key));
- otx2_set_rss_table(pfvf);
return 0;
}
+/* Get RSS configuration */
+static int otx2_get_rxfh(struct net_device *dev, u32 *indir,
+ u8 *hkey, u8 *hfunc)
+{
+ return otx2_get_rxfh_context(dev, indir, hkey, hfunc,
+ DEFAULT_RSS_CONTEXT_GROUP);
+}
+
+/* Configure RSS table and hash key */
+static int otx2_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+
+ u32 rss_context = DEFAULT_RSS_CONTEXT_GROUP;
+
+ return otx2_set_rxfh_context(dev, indir, hkey, hfunc, &rss_context, 0);
+}
+
static u32 otx2_get_msglevel(struct net_device *netdev)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
@@ -751,6 +921,304 @@ static int otx2_get_ts_info(struct net_device *netdev,
return 0;
}
+static struct cgx_fw_data *otx2_get_fwdata(struct otx2_nic *pfvf)
+{
+ struct cgx_fw_data *rsp = NULL;
+ struct msg_req *req;
+ int err = 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_get_aux_link_info(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (!err) {
+ rsp = (struct cgx_fw_data *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ } else {
+ rsp = ERR_PTR(err);
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rsp;
+}
+
+static int otx2_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp;
+ const int fec[] = {
+ ETHTOOL_FEC_OFF,
+ ETHTOOL_FEC_BASER,
+ ETHTOOL_FEC_RS,
+ ETHTOOL_FEC_BASER | ETHTOOL_FEC_RS};
+#define FEC_MAX_INDEX 4
+ if (pfvf->linfo.fec < FEC_MAX_INDEX)
+ fecparam->active_fec = fec[pfvf->linfo.fec];
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_fec < FEC_MAX_INDEX) {
+ if (!rsp->fwdata.supported_fec)
+ fecparam->fec = ETHTOOL_FEC_NONE;
+ else
+ fecparam->fec = fec[rsp->fwdata.supported_fec];
+ }
+ return 0;
+}
+
+static int otx2_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct mbox *mbox = &pfvf->mbox;
+ struct fec_mode *req, *rsp;
+ int err = 0, fec = 0;
+
+ switch (fecparam->fec) {
+ /* Firmware does not support AUTO mode consider it as FEC_OFF */
+ case ETHTOOL_FEC_OFF:
+ case ETHTOOL_FEC_AUTO:
+ fec = OTX2_FEC_OFF;
+ break;
+ case ETHTOOL_FEC_RS:
+ fec = OTX2_FEC_RS;
+ break;
+ case ETHTOOL_FEC_BASER:
+ fec = OTX2_FEC_BASER;
+ break;
+ default:
+ netdev_warn(pfvf->netdev, "Unsupported FEC mode: %d",
+ fecparam->fec);
+ return -EINVAL;
+ }
+
+ if (fec == pfvf->linfo.fec)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_fec_param(&pfvf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+ req->fec = fec;
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto end;
+
+ rsp = (struct fec_mode *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (rsp->fec >= 0)
+ pfvf->linfo.fec = rsp->fec;
+ else
+ err = rsp->fec;
+end:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
+static void otx2_get_fec_info(u64 index, int req_mode,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_fec_modes) = { 0, };
+
+ switch (index) {
+ case OTX2_FEC_NONE:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ case OTX2_FEC_BASER | OTX2_FEC_RS:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ otx2_fec_modes);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ otx2_fec_modes);
+ break;
+ }
+
+ /* Add fec modes to existing modes */
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_or(link_ksettings->link_modes.advertising,
+ link_ksettings->link_modes.advertising,
+ otx2_fec_modes);
+ else
+ linkmode_or(link_ksettings->link_modes.supported,
+ link_ksettings->link_modes.supported,
+ otx2_fec_modes);
+}
+
+static void otx2_get_link_mode_info(u64 link_mode_bmap,
+ bool req_mode,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(otx2_link_modes) = { 0, };
+ const int otx2_sgmii_features[6] = {
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ };
+ /* CGX link modes to Ethtool link mode mapping */
+ const int cgx_link_mode[27] = {
+ 0, /* SGMII Mode */
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ 0,
+ 0,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ 0,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
+ };
+ u8 bit;
+
+ link_mode_bmap = link_mode_bmap & OTX2_ETHTOOL_SUPPORTED_MODES;
+
+ for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) {
+ /* SGMII mode is set */
+ if (bit == 0)
+ linkmode_set_bit_array(otx2_sgmii_features,
+ ARRAY_SIZE(otx2_sgmii_features),
+ otx2_link_modes);
+ else
+ linkmode_set_bit(cgx_link_mode[bit], otx2_link_modes);
+ }
+
+ if (req_mode == OTX2_MODE_ADVERTISED)
+ linkmode_copy(link_ksettings->link_modes.advertising,
+ otx2_link_modes);
+ else
+ linkmode_copy(link_ksettings->link_modes.supported,
+ otx2_link_modes);
+}
+
+static int otx2_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct cgx_fw_data *rsp = NULL;
+
+ cmd->base.duplex = pfvf->linfo.full_duplex;
+ cmd->base.speed = pfvf->linfo.speed;
+ cmd->base.autoneg = pfvf->linfo.an;
+
+ rsp = otx2_get_fwdata(pfvf);
+ if (IS_ERR(rsp))
+ return PTR_ERR(rsp);
+
+ if (rsp->fwdata.supported_an)
+ ethtool_link_ksettings_add_link_mode(cmd,
+ supported,
+ Autoneg);
+
+ otx2_get_link_mode_info(rsp->fwdata.advertised_link_modes,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_fec_info(rsp->fwdata.advertised_fec,
+ OTX2_MODE_ADVERTISED, cmd);
+ otx2_get_link_mode_info(rsp->fwdata.supported_link_modes,
+ OTX2_MODE_SUPPORTED, cmd);
+ otx2_get_fec_info(rsp->fwdata.supported_fec,
+ OTX2_MODE_SUPPORTED, cmd);
+ return 0;
+}
+
+static void otx2_get_advertised_mode(const struct ethtool_link_ksettings *cmd,
+ u64 *mode)
+{
+ u32 bit_pos;
+
+ /* Firmware does not support requesting multiple advertised modes
+ * return first set bit
+ */
+ bit_pos = find_first_bit(cmd->link_modes.advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ if (bit_pos != __ETHTOOL_LINK_MODE_MASK_NBITS)
+ *mode = bit_pos;
+}
+
+static int otx2_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct ethtool_link_ksettings cur_ks;
+ struct cgx_set_link_mode_req *req;
+ struct mbox *mbox = &pf->mbox;
+ int err = 0;
+
+ memset(&cur_ks, 0, sizeof(struct ethtool_link_ksettings));
+
+ if (!ethtool_validate_speed(cmd->base.speed) ||
+ !ethtool_validate_duplex(cmd->base.duplex))
+ return -EINVAL;
+
+ if (cmd->base.autoneg != AUTONEG_ENABLE &&
+ cmd->base.autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ otx2_get_link_ksettings(netdev, &cur_ks);
+
+ /* Check requested modes against supported modes by hardware */
+ if (!bitmap_subset(cmd->link_modes.advertising,
+ cur_ks.link_modes.supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
+ return -EINVAL;
+
+ mutex_lock(&mbox->lock);
+ req = otx2_mbox_alloc_msg_cgx_set_link_mode(&pf->mbox);
+ if (!req) {
+ err = -ENOMEM;
+ goto end;
+ }
+
+ req->args.speed = cmd->base.speed;
+ /* firmware expects 1 for half duplex and 0 for full duplex
+ * hence inverting
+ */
+ req->args.duplex = cmd->base.duplex ^ 0x1;
+ req->args.an = cmd->base.autoneg;
+ otx2_get_advertised_mode(cmd, &req->args.mode);
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+end:
+ mutex_unlock(&mbox->lock);
+ return err;
+}
+
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -771,11 +1239,17 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_msglevel = otx2_get_msglevel,
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_ts_info = otx2_get_ts_info,
+ .get_fecparam = otx2_get_fecparam,
+ .set_fecparam = otx2_set_fecparam,
+ .get_link_ksettings = otx2_get_link_ksettings,
+ .set_link_ksettings = otx2_set_link_ksettings,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
@@ -850,6 +1324,20 @@ static int otx2vf_get_sset_count(struct net_device *netdev, int sset)
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count + 1;
}
+static int otx2vf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.speed = SPEED_100000;
+ } else {
+ return otx2_get_link_ksettings(netdev, cmd);
+ }
+ return 0;
+}
+
static const struct ethtool_ops otx2vf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -866,6 +1354,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_rxfh_indir_size = otx2_get_rxfh_indir_size,
.get_rxfh = otx2_get_rxfh,
.set_rxfh = otx2_set_rxfh,
+ .get_rxfh_context = otx2_get_rxfh_context,
+ .set_rxfh_context = otx2_set_rxfh_context,
.get_ringparam = otx2_get_ringparam,
.set_ringparam = otx2_set_ringparam,
.get_coalesce = otx2_get_coalesce,
@@ -874,6 +1364,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_link_ksettings = otx2vf_get_link_ksettings,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index be8ccfce1848..0dbbf38e0597 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell OcteonTx2 RVU Physical Function ethernet driver
*
* Copyright (C) 2020 Marvell.
*/
@@ -16,6 +16,7 @@ struct otx2_flow {
u32 location;
u16 entry;
bool is_vf;
+ u8 rss_ctx_id;
int vf;
};
@@ -245,6 +246,7 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
if (iter->location == location) {
nfc->fs = iter->flow_spec;
+ nfc->rss_context = iter->rss_ctx_id;
return 0;
}
}
@@ -270,14 +272,16 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
return err;
}
-static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
- struct npc_install_flow_req *req,
- u32 flow_type)
+static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
{
struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
+ struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
@@ -297,10 +301,16 @@ static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip4dst));
req->features |= BIT_ULL(NPC_DIP_IPV4);
}
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
break;
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
if (ipv4_l4_mask->ip4src) {
memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
sizeof(pkt->ip4src));
@@ -339,20 +349,60 @@ static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
else
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
+ if (flow_type == UDP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IP);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (ah_esp_mask->ip4src) {
+ memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
+ sizeof(pkt->ip4src));
+ memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
+ sizeof(pmask->ip4src));
+ req->features |= BIT_ULL(NPC_SIP_IPV4);
+ }
+ if (ah_esp_mask->ip4dst) {
+ memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
+ sizeof(pkt->ip4dst));
+ memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
+ sizeof(pmask->ip4dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV4);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tos & ah_esp_mask->tos))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V4_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
break;
default:
break;
}
+
+ return 0;
}
-static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
- struct npc_install_flow_req *req,
- u32 flow_type)
+static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
+ struct npc_install_flow_req *req,
+ u32 flow_type)
{
struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
+ struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
@@ -372,10 +422,16 @@ static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
sizeof(pmask->ip6dst));
req->features |= BIT_ULL(NPC_DIP_IPV6);
}
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
break;
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
sizeof(pkt->ip6src));
@@ -414,10 +470,47 @@ static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
else
req->features |= BIT_ULL(NPC_DPORT_SCTP);
}
+ if (flow_type == UDP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_UDP);
+ else if (flow_type == TCP_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_TCP);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ pkt->etype = cpu_to_be16(ETH_P_IPV6);
+ pmask->etype = cpu_to_be16(0xFFFF);
+ req->features |= BIT_ULL(NPC_ETYPE);
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
+ memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
+ sizeof(pkt->ip6src));
+ memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
+ sizeof(pmask->ip6src));
+ req->features |= BIT_ULL(NPC_SIP_IPV6);
+ }
+ if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
+ memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
+ sizeof(pkt->ip6dst));
+ memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
+ sizeof(pmask->ip6dst));
+ req->features |= BIT_ULL(NPC_DIP_IPV6);
+ }
+
+ /* NPC profile doesn't extract AH/ESP header fields */
+ if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
+ (ah_esp_mask->tclass & ah_esp_mask->tclass))
+ return -EOPNOTSUPP;
+
+ if (flow_type == AH_V6_FLOW)
+ req->features |= BIT_ULL(NPC_IPPROTO_AH);
+ else
+ req->features |= BIT_ULL(NPC_IPPROTO_ESP);
default:
break;
}
+
+ return 0;
}
int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
@@ -428,8 +521,9 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
struct flow_msg *pmask = &req->mask;
struct flow_msg *pkt = &req->packet;
u32 flow_type;
+ int ret;
- flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
switch (flow_type) {
/* bits not set in mask are don't care */
case ETHER_FLOW:
@@ -455,13 +549,21 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
case TCP_V4_FLOW:
case UDP_V4_FLOW:
case SCTP_V4_FLOW:
- otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
break;
case IPV6_USER_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
case SCTP_V6_FLOW:
- otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
+ if (ret)
+ return ret;
break;
default:
return -EOPNOTSUPP;
@@ -532,9 +634,13 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
/* change to unicast only if action of default entry is not
* requested by user
*/
- if (req->op != NIX_RX_ACTION_DEFAULT)
+ if (flow->flow_spec.flow_type & FLOW_RSS) {
+ req->op = NIX_RX_ACTIONOP_RSS;
+ req->index = flow->rss_ctx_id;
+ } else {
req->op = NIX_RX_ACTIONOP_UCAST;
- req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ req->index = ethtool_get_flow_spec_ring(ring_cookie);
+ }
vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
if (vf > pci_num_vf(pfvf->pdev)) {
mutex_unlock(&pfvf->mbox.lock);
@@ -555,14 +661,16 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
-int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
+int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
- u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
bool new = false;
+ u32 ring;
int err;
+ ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
return -ENOMEM;
@@ -585,6 +693,9 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
/* struct copy */
flow->flow_spec = *fsp;
+ if (fsp->flow_type & FLOW_RSS)
+ flow->rss_ctx_id = nfc->rss_context;
+
err = otx2_add_flow_msg(pfvf, flow);
if (err) {
if (new)
@@ -647,6 +758,22 @@ int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
return 0;
}
+void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
+{
+ struct otx2_flow *flow, *tmp;
+ int err;
+
+ list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
+ if (flow->rss_ctx_id != ctx_id)
+ continue;
+ err = otx2_remove_flow(pfvf, flow->location);
+ if (err)
+ netdev_warn(pfvf->netdev,
+ "Can't delete the rule %d associated with this rss group err:%d",
+ flow->location, err);
+ }
+}
+
int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 634d60655a74..53ab1814d74b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+/* Marvell OcteonTx2 RVU Physical Function ethernet driver
*
* Copyright (C) 2020 Marvell International Ltd.
*
@@ -22,10 +22,11 @@
#include "otx2_txrx.h"
#include "otx2_struct.h"
#include "otx2_ptp.h"
+#include "cn10k.h"
#include <rvu_trace.h>
-#define DRV_NAME "octeontx2-nicpf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
+#define DRV_NAME "rvu_nicpf"
+#define DRV_STRING "Marvell RVU NIC Physical Function Driver"
/* Supported devices */
static const struct pci_device_id otx2_pf_id_table[] = {
@@ -585,9 +586,17 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
- base = readq((void __iomem *)((u64)pf->reg_base + RVU_PF_VF_BAR4_ADDR));
- hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
+ /* On CN10K platform, PF <-> VF mailbox region follows after
+ * PF <-> AF mailbox region.
+ */
+ if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
+ base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
+ MBOX_SIZE;
+ else
+ base = readq((void __iomem *)((u64)pf->reg_base +
+ RVU_PF_VF_BAR4_ADDR));
+ hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
if (!hwbase) {
err = -ENOMEM;
goto free_wq;
@@ -779,6 +788,9 @@ static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
case MBOX_MSG_CGX_STATS:
mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
break;
+ case MBOX_MSG_CGX_FEC_STATS:
+ mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
+ break;
default:
if (msg->rc)
dev_err(pf->dev,
@@ -1039,7 +1051,7 @@ static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
* device memory to allow unaligned accesses.
*/
hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM));
+ MBOX_SIZE);
if (!hwbase) {
dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
err = -ENOMEM;
@@ -1276,6 +1288,33 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
}
}
+static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
+{
+ int frame_size;
+ int total_size;
+ int rbuf_size;
+
+ /* The data transferred by NIX to memory consists of actual packet
+ * plus additional data which has timestamp and/or EDSA/HIGIG2
+ * headers if interface is configured in corresponding modes.
+ * NIX transfers entire data using 6 segments/buffers and writes
+ * a CQE_RX descriptor with those segment addresses. First segment
+ * has additional data prepended to packet. Also software omits a
+ * headroom of 128 bytes and sizeof(struct skb_shared_info) in
+ * each segment. Hence the total size of memory needed
+ * to receive a packet with 'mtu' is:
+ * frame size = mtu + additional data;
+ * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
+ * each receive buffer size = memory / 6;
+ */
+ frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ total_size = frame_size + (OTX2_HEAD_ROOM +
+ OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
+ rbuf_size = total_size / 6;
+
+ return ALIGN(rbuf_size, 2048);
+}
+
static int otx2_init_hw_resources(struct otx2_nic *pf)
{
struct nix_lf_free_req *free_req;
@@ -1292,9 +1331,9 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->sqpool_cnt = hw->tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- /* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
- OTX2_ETH_HLEN);
+ pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+
+ pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1487,6 +1526,14 @@ int otx2_open(struct net_device *netdev)
if (!qset->rq)
goto err_free_mem;
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
+ /* Reserve LMT lines for NPA AURA batch free */
+ pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
+ /* Reserve LMT lines for NIX TX */
+ pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
+ (NIX_LMTID_BASE * LMT_LINE_SIZE));
+ }
+
err = otx2_init_hw_resources(pf);
if (err)
goto err_free_mem;
@@ -2325,6 +2372,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_netdev;
}
+ otx2_setup_dev_hw_settings(pf);
+
/* Init PF <=> AF mailbox stuff */
err = otx2_pfaf_mbox_init(pf);
if (err)
@@ -2350,7 +2399,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(pf);
+ err = cn10k_pf_lmtst_init(pf);
+ if (err)
+ goto err_detach_rsrc;
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -2405,7 +2456,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* MTU range: 64 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
+ netdev->max_mtu = otx2_get_max_mtu(pf);
err = register_netdev(netdev);
if (err) {
@@ -2435,6 +2486,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
+ if (hw->lmt_base)
+ iounmap(hw->lmt_base);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
@@ -2594,6 +2647,9 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_ptp_destroy(pf);
otx2_mcam_flow_del(pf);
otx2_detach_resources(&pf->mbox);
+ if (pf->hw.lmt_base)
+ iounmap(pf->hw.lmt_base);
+
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 867f646e0802..21b811c6ee0f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -44,6 +44,8 @@
#define RVU_PF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_PF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_PF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_PF_VF_MBOX_ADDR (0xC40)
+#define RVU_PF_LMTLINE_ADDR (0xC48)
/* RVU VF registers */
#define RVU_VF_VFPF_MBOX0 (0x00000)
@@ -57,6 +59,7 @@
#define RVU_VF_MSIX_VECX_ADDR(a) (0x000 | (a) << 4)
#define RVU_VF_MSIX_VECX_CTL(a) (0x008 | (a) << 4)
#define RVU_VF_MSIX_PBAX(a) (0xF0000 | (a) << 3)
+#define RVU_VF_MBOX_REGION (0xC0000)
#define RVU_FUNC_BLKADDR_SHIFT 20
#define RVU_FUNC_BLKADDR_MASK 0x1FULL
@@ -91,6 +94,7 @@
#define NPA_LF_QINTX_INT_W1S(a) (NPA_LFBASE | 0x318 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1S(a) (NPA_LFBASE | 0x320 | (a) << 12)
#define NPA_LF_QINTX_ENA_W1C(a) (NPA_LFBASE | 0x330 | (a) << 12)
+#define NPA_LF_AURA_BATCH_FREE0 (NPA_LFBASE | 0x400)
/* NIX LF registers */
#define NIX_LFBASE (BLKTYPE_NIX << RVU_FUNC_BLKADDR_SHIFT)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
index cba59ddf71bb..1f49b3caf5d4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
@@ -142,7 +142,9 @@ struct nix_rx_parse_s {
u64 vtag0_ptr : 8; /* W5 */
u64 vtag1_ptr : 8;
u64 flow_key_alg : 5;
- u64 rsvd_383_341 : 43;
+ u64 rsvd_359_341 : 19;
+ u64 color : 2;
+ u64 rsvd_383_362 : 22;
u64 rsvd_447_384; /* W6 */
};
@@ -218,7 +220,8 @@ struct nix_sqe_ext_s {
u64 vlan1_ins_tci : 16;
u64 vlan0_ins_ena : 1;
u64 vlan1_ins_ena : 1;
- u64 rsvd_127_114 : 14;
+ u64 init_color : 2;
+ u64 rsvd_127_116 : 12;
};
struct nix_sqe_sg_s {
@@ -237,7 +240,8 @@ struct nix_sqe_sg_s {
/* NIX send memory subdescriptor structure */
struct nix_sqe_mem_s {
u64 offset : 16; /* W0 */
- u64 rsvd_52_16 : 37;
+ u64 rsvd_51_16 : 36;
+ u64 per_lso_seg : 1;
u64 wmem : 1;
u64 dsz : 2;
u64 alg : 4;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index d0e25414f1a1..22ec03a618b1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -17,6 +17,7 @@
#include "otx2_struct.h"
#include "otx2_txrx.h"
#include "otx2_ptp.h"
+#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
@@ -199,7 +200,8 @@ static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe,
sg = (struct nix_rx_sg_s *)start;
seg_addr = &sg->seg_addr;
for (seg = 0; seg < sg->segs; seg++, seg_addr++)
- otx2_aura_freeptr(pfvf, qidx, *seg_addr & ~0x07ULL);
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx,
+ *seg_addr & ~0x07ULL);
start += sizeof(*sg);
}
}
@@ -255,12 +257,11 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
/* For now ignore all the NPC parser errors and
* pass the packets to stack.
*/
- if (cqe->sg.segs == 1)
- return false;
+ return false;
}
/* If RXALL is enabled pass on packets to stack. */
- if (cqe->sg.segs == 1 && (pfvf->netdev->features & NETIF_F_RXALL))
+ if (pfvf->netdev->features & NETIF_F_RXALL)
return false;
/* Free buffer back to pool */
@@ -275,9 +276,14 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe)
{
struct nix_rx_parse_s *parse = &cqe->parse;
+ struct nix_rx_sg_s *sg = &cqe->sg;
struct sk_buff *skb = NULL;
+ void *end, *start;
+ u64 *seg_addr;
+ u16 *seg_size;
+ int seg;
- if (unlikely(parse->errlev || parse->errcode || cqe->sg.segs > 1)) {
+ if (unlikely(parse->errlev || parse->errcode)) {
if (otx2_check_rcv_errors(pfvf, cqe, cq->cq_idx))
return;
}
@@ -286,9 +292,19 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
- cq->pool_ptrs++;
-
+ start = (void *)sg;
+ end = start + ((cqe->parse.desc_sizem1 + 1) * 16);
+ while (start < end) {
+ sg = (struct nix_rx_sg_s *)start;
+ seg_addr = &sg->seg_addr;
+ seg_size = (void *)sg;
+ for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
+ otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
+ parse);
+ cq->pool_ptrs++;
+ }
+ start += sizeof(*sg);
+ }
otx2_set_rxhash(pfvf, cqe, skb);
skb_record_rx_queue(skb, cq->cq_idx);
@@ -304,7 +320,6 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
{
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- s64 bufptr;
while (likely(processed_cqe < budget)) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
@@ -330,29 +345,23 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
if (unlikely(!cq->pool_ptrs))
return 0;
-
/* Refill pool with new buffers */
+ pfvf->hw_ops->refill_pool_ptrs(pfvf, cq);
+
+ return processed_cqe;
+}
+
+void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+{
+ struct otx2_nic *pfvf = dev;
+ dma_addr_t bufptr;
+
while (cq->pool_ptrs) {
- bufptr = __otx2_alloc_rbuf(pfvf, cq->rbpool);
- if (unlikely(bufptr <= 0)) {
- struct refill_work *work;
- struct delayed_work *dwork;
-
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- }
+ if (otx2_alloc_buffer(pfvf, cq, &bufptr))
break;
- }
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
-
- return processed_cqe;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
@@ -439,7 +448,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
return workdone;
}
-static void otx2_sqe_flush(struct otx2_snd_queue *sq, int size)
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx)
{
u64 status;
@@ -797,7 +807,7 @@ static void otx2_sq_append_tso(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
sqe_hdr->sizem1 = (offset / 16) - 1;
/* Flush SQE to HW */
- otx2_sqe_flush(sq, offset);
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
}
}
@@ -806,15 +816,17 @@ static bool is_hw_tso_supported(struct otx2_nic *pfvf,
{
int payload_len, last_seg_size;
- if (!pfvf->hw.hw_tso)
+ if (test_bit(HW_TSO, &pfvf->hw.cap_flag))
+ return true;
+
+ /* On 96xx A0, HW TSO not supported */
+ if (!is_96xx_B0(pfvf->pdev))
return false;
/* HW has an issue due to which when the payload of the last LSO
* segment is shorter than 16 bytes, some header fields may not
* be correctly modified, hence don't offload such TSO segments.
*/
- if (!is_96xx_B0(pfvf->pdev))
- return true;
payload_len = skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb));
last_seg_size = payload_len % skb_shinfo(skb)->gso_size;
@@ -915,7 +927,7 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
netdev_tx_sent_queue(txq, skb->len);
/* Flush SQE to HW */
- otx2_sqe_flush(sq, offset);
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 73af15685657..52486c1f0973 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -24,7 +24,6 @@
#define OTX2_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN)
#define OTX2_MIN_MTU 64
-#define OTX2_MAX_MTU (9212 - OTX2_ETH_HLEN)
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
@@ -114,6 +113,7 @@ struct otx2_cq_poll {
struct otx2_pool {
struct qmem *stack;
struct qmem *fc_addr;
+ u64 *lmt_addr;
u16 rbsize;
};
@@ -156,4 +156,10 @@ static inline u64 otx2_iova_to_phys(void *iommu_domain, dma_addr_t dma_addr)
int otx2_napi_handler(struct napi_struct *napi, int budget);
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx);
+void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
+ int size, int qidx);
+void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index d3e4cfd244e2..085be90a03eb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -7,9 +7,10 @@
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "cn10k.h"
-#define DRV_NAME "octeontx2-nicvf"
-#define DRV_STRING "Marvell OcteonTX2 NIC Virtual Function Driver"
+#define DRV_NAME "rvu_nicvf"
+#define DRV_STRING "Marvell RVU NIC Virtual Function Driver"
static const struct pci_device_id otx2_vf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_AFVF) },
@@ -277,7 +278,7 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
vf->mbox_wq = NULL;
}
- if (mbox->mbox.hwbase)
+ if (mbox->mbox.hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
iounmap((void __iomem *)mbox->mbox.hwbase);
otx2_mbox_destroy(&mbox->mbox);
@@ -297,16 +298,25 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
if (!vf->mbox_wq)
return -ENOMEM;
- /* Mailbox is a reserved memory (in RAM) region shared between
- * admin function (i.e PF0) and this VF, shouldn't be mapped as
- * device memory to allow unaligned accesses.
- */
- hwbase = ioremap_wc(pci_resource_start(vf->pdev, PCI_MBOX_BAR_NUM),
- pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM));
- if (!hwbase) {
- dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
- err = -ENOMEM;
- goto exit;
+ if (test_bit(CN10K_MBOX, &vf->hw.cap_flag)) {
+ /* For cn10k platform, VF mailbox region is in its BAR2
+ * register space
+ */
+ hwbase = vf->reg_base + RVU_VF_MBOX_REGION;
+ } else {
+ /* Mailbox is a reserved memory (in RAM) region shared between
+ * admin function (i.e PF0) and this VF, shouldn't be mapped as
+ * device memory to allow unaligned accesses.
+ */
+ hwbase = ioremap_wc(pci_resource_start(vf->pdev,
+ PCI_MBOX_BAR_NUM),
+ pci_resource_len(vf->pdev,
+ PCI_MBOX_BAR_NUM));
+ if (!hwbase) {
+ dev_err(vf->dev, "Unable to map VFAF mailbox region\n");
+ err = -ENOMEM;
+ goto exit;
+ }
}
err = otx2_mbox_init(&mbox->mbox, hwbase, vf->pdev, vf->reg_base,
@@ -329,6 +339,8 @@ static int otx2vf_vfaf_mbox_init(struct otx2_nic *vf)
return 0;
exit:
+ if (hwbase && !test_bit(CN10K_MBOX, &vf->hw.cap_flag))
+ iounmap(hwbase);
destroy_workqueue(vf->mbox_wq);
return err;
}
@@ -525,6 +537,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_free_irq_vectors;
}
+ otx2_setup_dev_hw_settings(vf);
/* Init VF <=> PF mailbox stuff */
err = otx2vf_vfaf_mbox_init(vf);
if (err)
@@ -548,7 +561,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- otx2_setup_dev_hw_settings(vf);
+ err = cn10k_vf_lmtst_init(vf);
+ if (err)
+ goto err_detach_rsrc;
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
@@ -571,7 +586,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* MTU range: 68 - 9190 */
netdev->min_mtu = OTX2_MIN_MTU;
- netdev->max_mtu = OTX2_MAX_MTU;
+ netdev->max_mtu = otx2_get_max_mtu(vf);
INIT_WORK(&vf->reset_task, otx2vf_reset_task);
@@ -600,6 +615,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_detach_rsrc:
+ if (hw->lmt_base)
+ iounmap(hw->lmt_base);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -628,8 +645,11 @@ static void otx2vf_remove(struct pci_dev *pdev)
cancel_work_sync(&vf->reset_task);
unregister_netdev(netdev);
otx2vf_disable_mbox_intr(vf);
-
otx2_detach_resources(&vf->mbox);
+
+ if (vf->hw.lmt_base)
+ iounmap(vf->hw.lmt_base);
+
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index 7d83e1f91ef1..49e052273f30 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -580,63 +580,55 @@ int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
}
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
- struct switchdev_trans *trans,
struct net_device *dev,
- unsigned long flags)
+ struct switchdev_brport_flags flags)
{
struct prestera_bridge_port *br_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
- err = prestera_hw_port_flood_set(port, flags & BR_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_FLOOD) {
+ err = prestera_hw_port_flood_set(port, flags.val & BR_FLOOD);
+ if (err)
+ return err;
+ }
- err = prestera_hw_port_learning_set(port, flags & BR_LEARNING);
- if (err)
- return err;
+ if (flags.mask & BR_LEARNING) {
+ err = prestera_hw_port_learning_set(port,
+ flags.val & BR_LEARNING);
+ if (err)
+ return err;
+ }
- memcpy(&br_port->flags, &flags, sizeof(flags));
+ memcpy(&br_port->flags, &flags.val, sizeof(flags.val));
return 0;
}
static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
- struct switchdev_trans *trans,
unsigned long ageing_clock_t)
{
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
struct prestera_switch *sw = port->sw;
- if (switchdev_trans_ph_prepare(trans)) {
- if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
- ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
- return -ERANGE;
- else
- return 0;
- }
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
}
static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
- struct switchdev_trans *trans,
struct net_device *dev,
bool vlan_enabled)
{
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge = prestera_bridge_by_dev(sw->swdev, dev);
if (WARN_ON(!bridge))
return -EINVAL;
@@ -665,19 +657,15 @@ static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
return 0;
}
-static int presterar_port_attr_stp_state_set(struct prestera_port *port,
- struct switchdev_trans *trans,
- struct net_device *dev,
- u8 state)
+static int prestera_port_attr_stp_state_set(struct prestera_port *port,
+ struct net_device *dev,
+ u8 state)
{
struct prestera_bridge_port *br_port;
struct prestera_bridge_vlan *br_vlan;
int err;
u16 vid;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
if (!br_port)
return 0;
@@ -713,34 +701,31 @@ err_port_stp_set:
static int prestera_port_obj_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
int err = 0;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = presterar_port_attr_stp_state_set(port, trans,
- attr->orig_dev,
- attr->u.stp_state);
+ err = prestera_port_attr_stp_state_set(port, attr->orig_dev,
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- if (attr->u.brport_flags &
+ if (attr->u.brport_flags.mask &
~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
err = -EINVAL;
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = prestera_port_attr_br_flags_set(port, trans,
- attr->orig_dev,
+ err = prestera_port_attr_br_flags_set(port, attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = prestera_port_attr_br_ageing_set(port, trans,
+ err = prestera_port_attr_br_ageing_set(port,
attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = prestera_port_attr_br_vlan_set(port, trans,
- attr->orig_dev,
+ err = prestera_port_attr_br_vlan_set(port, attr->orig_dev,
attr->u.vlan_filtering);
break;
default:
@@ -1020,7 +1005,6 @@ prestera_bridge_port_vlan_del(struct prestera_port *port,
static int prestera_port_vlans_add(struct prestera_port *port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1029,14 +1013,10 @@ static int prestera_port_vlans_add(struct prestera_port *port,
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- u16 vid;
if (netif_is_bridge_master(dev))
return 0;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
if (WARN_ON(!br_port))
return -EINVAL;
@@ -1045,22 +1025,13 @@ static int prestera_port_vlans_add(struct prestera_port *port,
if (!bridge->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = prestera_bridge_port_vlan_add(port, br_port,
- vid, flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return prestera_bridge_port_vlan_add(port, br_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
}
static int prestera_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(dev);
@@ -1069,7 +1040,7 @@ static int prestera_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- return prestera_port_vlans_add(port, vlan, trans, extack);
+ return prestera_port_vlans_add(port, vlan, extack);
default:
return -EOPNOTSUPP;
}
@@ -1081,7 +1052,6 @@ static int prestera_port_vlans_del(struct prestera_port *port,
struct net_device *dev = vlan->obj.orig_dev;
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
- u16 vid;
if (netif_is_bridge_master(dev))
return -EOPNOTSUPP;
@@ -1093,8 +1063,7 @@ static int prestera_port_vlans_del(struct prestera_port *port,
if (!br_port->bridge->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- prestera_bridge_port_vlan_del(port, br_port, vid);
+ prestera_bridge_port_vlan_del(port, br_port, vlan->vid);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6d2d60675ffd..01d3ee4b5829 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -353,7 +353,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
/* Setup gmac */
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
- mcr_new |= MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
/* Only update control register when needed! */
@@ -759,8 +759,8 @@ static void mtk_get_stats64(struct net_device *dev,
static inline int mtk_max_frag_size(int mtu)
{
/* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
- if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH)
- mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
+ mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -771,7 +771,7 @@ static inline int mtk_max_buf_size(int frag_size)
int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- WARN_ON(buf_size < MTK_MAX_RX_LENGTH);
+ WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
return buf_size;
}
@@ -2499,6 +2499,35 @@ static void mtk_uninit(struct net_device *dev)
mtk_rx_irq_disable(eth, ~0);
}
+static int mtk_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int length = new_mtu + MTK_RX_ETH_HLEN;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+
+ if (length <= 1518)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
+ else if (length <= 1536)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
+ else if (length <= 1552)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
+ else
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
+
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+ }
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mtk_mac *mac = netdev_priv(dev);
@@ -2795,6 +2824,7 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_set_mac_address = mtk_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mtk_do_ioctl,
+ .ndo_change_mtu = mtk_change_mtu,
.ndo_tx_timeout = mtk_tx_timeout,
.ndo_get_stats64 = mtk_get_stats64,
.ndo_fix_features = mtk_fix_features,
@@ -2896,7 +2926,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
- eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ else
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 454cfcd465fd..fd3cec8f06ba 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -17,12 +17,13 @@
#include <linux/phylink.h>
#define MTK_QDMA_PAGE_SIZE 2048
-#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH_2K 2048
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_DMA_SIZE 256
#define MTK_NAPI_WEIGHT 64
#define MTK_MAC_COUNT 2
-#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
@@ -320,7 +321,12 @@
/* Mac control registers */
#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
-#define MAC_MCR_MAX_RX_1536 BIT(24)
+#define MAC_MCR_MAX_RX_MASK GENMASK(25, 24)
+#define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24))
+#define MAC_MCR_MAX_RX_1518 0x0
+#define MAC_MCR_MAX_RX_1536 0x1
+#define MAC_MCR_MAX_RX_1552 0x2
+#define MAC_MCR_MAX_RX_2048 0x3
#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
#define MAC_MCR_FORCE_MODE BIT(15)
#define MAC_MCR_TX_EN BIT(14)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 32aad4d32b88..51b9700fce83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2839,8 +2839,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
@@ -2873,8 +2871,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx4_en_features_check,
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
.ndo_bpf = mlx4_xdp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c1c9118a66c9..e35e4d7ef4d1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -682,8 +682,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
rcu_read_lock();
xdp_prog = rcu_dereference(ring->xdp_prog);
- xdp.rxq = &ring->xdp_rxq;
- xdp.frame_sz = priv->frag_info[0].frag_stride;
+ xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
@@ -777,10 +776,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp.data_hard_start = va - frags[0].page_offset;
- xdp.data = va;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + length;
+ xdp_prepare_buff(&xdp, va - frags[0].page_offset,
+ frags[0].page_offset, length, false);
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 6e4d7bb7fea2..9d623e38d783 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -149,14 +149,14 @@ config MLX5_IPSEC
IPsec support for the Connect-X family.
config MLX5_EN_IPSEC
- bool "IPSec XFRM cryptography-offload accelaration"
+ bool "IPSec XFRM cryptography-offload acceleration"
depends on MLX5_CORE_EN
depends on XFRM_OFFLOAD
depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
depends on MLX5_FPGA_IPSEC || MLX5_IPSEC
default n
help
- Build support for IPsec cryptography-offload accelaration in the NIC.
+ Build support for IPsec cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
@@ -166,7 +166,6 @@ config MLX5_FPGA_TLS
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
depends on MLX5_FPGA
- depends on XPS
select MLX5_EN_TLS
default n
help
@@ -181,7 +180,6 @@ config MLX5_TLS
depends on TLS_DEVICE
depends on TLS=y || MLX5_CORE=m
depends on MLX5_CORE_EN
- depends on XPS
select MLX5_ACCEL
select MLX5_EN_TLS
default n
@@ -192,7 +190,7 @@ config MLX5_TLS
config MLX5_EN_TLS
bool
help
- Build support for TLS cryptography-offload accelaration in the NIC.
+ Build support for TLS cryptography-offload acceleration in the NIC.
Note: Support for hardware with this capability needs to be selected
for this option to become available.
@@ -203,3 +201,22 @@ config MLX5_SW_STEERING
default y
help
Build support for software-managed steering in the NIC.
+
+config MLX5_SF
+ bool "Mellanox Technologies subfunction device support using auxiliary device"
+ depends on MLX5_CORE && MLX5_CORE_EN
+ default n
+ help
+ Build support for subfuction device in the NIC. A Mellanox subfunction
+ device can support RDMA, netdevice and vdpa device.
+ It is similar to a SRIOV VF but it doesn't require SRIOV support.
+
+config MLX5_SF_MANAGER
+ bool
+ depends on MLX5_SF && MLX5_ESWITCH
+ default y
+ help
+ Build support for subfuction port in the NIC. A Mellanox subfunction
+ port is managed through devlink. A subfunction supports RDMA, netdevice
+ and vdpa device. It is similar to a SRIOV VF but it doesn't require
+ SRIOV support.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 77961643d5a9..8cb2625472c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -16,7 +16,8 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o fw_reset.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
+ fw_reset.o qos.o
#
# Netdev basic
@@ -25,7 +26,8 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
- en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o
+ en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
+ en/qos.o en/trap.o
#
# Netdev extra
@@ -38,6 +40,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/mapping.o lib/fs_chains.o en/tc_tun.o \
+ esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
@@ -83,5 +86,15 @@ mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o
steering/dr_matcher.o steering/dr_rule.o \
steering/dr_icm_pool.o steering/dr_buddy.o \
steering/dr_ste.o steering/dr_send.o \
+ steering/dr_ste_v0.o steering/dr_ste_v1.o \
steering/dr_cmd.o steering/dr_fw.o \
steering/dr_action.o steering/fs_dr.o
+#
+# SF device
+#
+mlx5_core-$(CONFIG_MLX5_SF) += sf/vhca_event.o sf/dev/dev.o sf/dev/driver.o
+
+#
+# SF manager
+#
+mlx5_core-$(CONFIG_MLX5_SF_MANAGER) += sf/cmd.o sf/hw_table.o sf/devlink.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 50c7b9ee80c3..e8cecd50558d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -333,6 +333,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_MEMIC:
case MLX5_CMD_OP_PAGE_FAULT_RESUME:
case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS:
+ case MLX5_CMD_OP_DEALLOC_SF:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -464,6 +465,9 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_ALLOC_MEMIC:
case MLX5_CMD_OP_MODIFY_XRQ:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
+ case MLX5_CMD_OP_QUERY_VHCA_STATE:
+ case MLX5_CMD_OP_MODIFY_VHCA_STATE:
+ case MLX5_CMD_OP_ALLOC_SF:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -657,6 +661,10 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(ALLOC_SF);
+ MLX5_COMMAND_STR_CASE(DEALLOC_SF);
default: return "unknown command opcode";
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 3261d0dc1104..d7d8a68ef23d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -7,6 +7,8 @@
#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
+#include "sf/dev/dev.h"
+#include "sf/sf.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
struct devlink_flash_update_params *params,
@@ -127,6 +129,22 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ bool sf_dev_allocated;
+
+ sf_dev_allocated = mlx5_sf_dev_allocated(dev);
+ if (sf_dev_allocated) {
+ /* Reload results in deleting SF device which further results in
+ * unregistering devlink instance while holding devlink_mutext.
+ * Hence, do not support reload.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (mlx5_lag_is_active(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
+ return -EOPNOTSUPP;
+ }
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
@@ -168,6 +186,91 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
return 0;
}
+static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
+{
+ struct mlx5_devlink_trap *dl_trap;
+
+ list_for_each_entry(dl_trap, &dev->priv.traps, list)
+ if (dl_trap->trap.id == trap_id)
+ return dl_trap;
+
+ return NULL;
+}
+
+static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = kzalloc(sizeof(*dl_trap), GFP_KERNEL);
+ if (!dl_trap)
+ return -ENOMEM;
+
+ dl_trap->trap.id = trap->id;
+ dl_trap->trap.action = DEVLINK_TRAP_ACTION_DROP;
+ dl_trap->item = trap_ctx;
+
+ if (mlx5_find_trap_by_id(dev, trap->id)) {
+ kfree(dl_trap);
+ mlx5_core_err(dev, "Devlink trap: Trap 0x%x already found", trap->id);
+ return -EEXIST;
+ }
+
+ list_add_tail(&dl_trap->list, &dev->priv.traps);
+ return 0;
+}
+
+static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap->id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Missing trap id 0x%x", trap->id);
+ return;
+ }
+ list_del(&dl_trap->list);
+ kfree(dl_trap);
+}
+
+static int mlx5_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ enum devlink_trap_action action_orig;
+ struct mlx5_devlink_trap *dl_trap;
+ int err = 0;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap->id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (action == dl_trap->trap.action)
+ goto out;
+
+ action_orig = dl_trap->trap.action;
+ dl_trap->trap.action = action;
+ err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
+ &dl_trap->trap);
+ if (err)
+ dl_trap->trap.action = action_orig;
+out:
+ return err;
+}
+
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
@@ -179,6 +282,12 @@ static const struct devlink_ops mlx5_devlink_ops = {
.port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get,
.port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set,
#endif
+#ifdef CONFIG_MLX5_SF_MANAGER
+ .port_new = mlx5_devlink_sf_port_new,
+ .port_del = mlx5_devlink_sf_port_del,
+ .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
+ .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
+#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
@@ -186,8 +295,59 @@ static const struct devlink_ops mlx5_devlink_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = mlx5_devlink_reload_down,
.reload_up = mlx5_devlink_reload_up,
+ .trap_init = mlx5_devlink_trap_init,
+ .trap_fini = mlx5_devlink_trap_fini,
+ .trap_action_set = mlx5_devlink_trap_action_set,
};
+void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
+ struct devlink_port *dl_port)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap_id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Report on invalid trap id 0x%x", trap_id);
+ return;
+ }
+
+ if (dl_trap->trap.action != DEVLINK_TRAP_ACTION_TRAP) {
+ mlx5_core_dbg(dev, "Devlink trap: Trap id %d has action %d", trap_id,
+ dl_trap->trap.action);
+ return;
+ }
+ devlink_trap_report(devlink, skb, dl_trap->item, dl_port, NULL);
+}
+
+int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev)
+{
+ struct mlx5_devlink_trap *dl_trap;
+ int count = 0;
+
+ list_for_each_entry(dl_trap, &dev->priv.traps, list)
+ if (dl_trap->trap.action == DEVLINK_TRAP_ACTION_TRAP)
+ count++;
+
+ return count;
+}
+
+int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
+ enum devlink_trap_action *action)
+{
+ struct mlx5_devlink_trap *dl_trap;
+
+ dl_trap = mlx5_find_trap_by_id(dev, trap_id);
+ if (!dl_trap) {
+ mlx5_core_err(dev, "Devlink trap: Get action on invalid trap id 0x%x",
+ trap_id);
+ return -EINVAL;
+ }
+
+ *action = dl_trap->trap.action;
+ return 0;
+}
+
struct devlink *mlx5_devlink_alloc(void)
{
return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev));
@@ -273,6 +433,10 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
return -EOPNOTSUPP;
}
+ if (mlx5_core_is_mp_slave(dev) || mlx5_lag_is_active(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Multi port slave/Lag device can't configure RoCE");
+ return -EOPNOTSUPP;
+ }
return 0;
}
@@ -358,6 +522,49 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
#endif
}
+#define MLX5_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT)
+
+static const struct devlink_trap mlx5_traps_arr[] = {
+ MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ MLX5_TRAP_DROP(DMAC_FILTER, L2_DROPS),
+};
+
+static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+};
+
+static int mlx5_devlink_traps_register(struct devlink *devlink)
+{
+ struct mlx5_core_dev *core_dev = devlink_priv(devlink);
+ int err;
+
+ err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+ if (err)
+ return err;
+
+ err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
+ &core_dev->priv);
+ if (err)
+ goto err_trap_group;
+ return 0;
+
+err_trap_group:
+ devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+ return err;
+}
+
+static void mlx5_devlink_traps_unregister(struct devlink *devlink)
+{
+ devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
+ devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
+ ARRAY_SIZE(mlx5_trap_groups_arr));
+}
+
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
{
int err;
@@ -372,8 +579,16 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
goto params_reg_err;
mlx5_devlink_set_params_init_values(devlink);
devlink_params_publish(devlink);
+
+ err = mlx5_devlink_traps_register(devlink);
+ if (err)
+ goto traps_reg_err;
+
return 0;
+traps_reg_err:
+ devlink_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
params_reg_err:
devlink_unregister(devlink);
return err;
@@ -381,6 +596,7 @@ params_reg_err:
void mlx5_devlink_unregister(struct devlink *devlink)
{
+ mlx5_devlink_traps_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index f0de327a59be..eff107dad922 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -12,6 +12,24 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
};
+struct mlx5_trap_ctx {
+ int id;
+ int action;
+};
+
+struct mlx5_devlink_trap {
+ struct mlx5_trap_ctx trap;
+ void *item;
+ struct list_head list;
+};
+
+struct mlx5_core_dev;
+void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
+ struct devlink_port *dl_port);
+int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
+int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
+ enum devlink_trap_action *action);
+
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
index 1177860a2ee4..f15718db5d0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
@@ -15,7 +15,7 @@ TRACE_EVENT(mlx5e_rep_neigh_update,
TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, const u8 *ha,
bool neigh_connected),
TP_ARGS(nhe, ha, neigh_connected),
- TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ TP_STRUCT__entry(__string(devname, nhe->neigh_dev->name)
__array(u8, ha, ETH_ALEN)
__array(u8, v4, 4)
__array(u8, v6, 16)
@@ -25,7 +25,7 @@ TRACE_EVENT(mlx5e_rep_neigh_update,
struct in6_addr *pin6;
__be32 *p32;
- __assign_str(devname, mn->dev->name);
+ __assign_str(devname, nhe->neigh_dev->name);
__entry->neigh_connected = neigh_connected;
memcpy(__entry->ha, ha, ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
index d4e6cfaaade3..ac52ef37f38a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
@@ -77,7 +77,7 @@ TRACE_EVENT(mlx5e_stats_flower,
TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
TP_PROTO(const struct mlx5e_neigh_hash_entry *nhe, bool neigh_used),
TP_ARGS(nhe, neigh_used),
- TP_STRUCT__entry(__string(devname, nhe->m_neigh.dev->name)
+ TP_STRUCT__entry(__string(devname, nhe->neigh_dev->name)
__array(u8, v4, 4)
__array(u8, v6, 16)
__field(bool, neigh_used)
@@ -86,7 +86,7 @@ TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
struct in6_addr *pin6;
__be32 *p32;
- __assign_str(devname, mn->dev->name);
+ __assign_str(devname, nhe->neigh_dev->name);
__entry->neigh_used = neigh_used;
p32 = (__be32 *)__entry->v4;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 055baf3b6cb1..7435fe6829b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -55,7 +55,9 @@
#include "en_stats.h"
#include "en/dcbnl.h"
#include "en/fs.h"
+#include "en/qos.h"
#include "lib/hv_vhca.h"
+#include "lib/clock.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool;
@@ -136,10 +138,10 @@ struct page_pool;
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
-#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
+#define MLX5E_LOG_INDIR_RQT_SIZE 0x8
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MIN_NUM_CHANNELS 0x1
-#define MLX5E_MAX_NUM_CHANNELS MLX5E_INDIR_RQT_SIZE
+#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_TX_XSK_POLL_BUDGET 64
@@ -161,6 +163,9 @@ do { \
##__VA_ARGS__); \
} while (0)
+#define mlx5e_state_dereference(priv, p) \
+ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
+
enum mlx5e_rq_group {
MLX5E_RQ_GROUP_REGULAR,
MLX5E_RQ_GROUP_XSK,
@@ -389,6 +394,7 @@ struct mlx5e_txqsq {
u32 rate_limit;
struct work_struct recover_work;
struct mlx5e_ptpsq *ptpsq;
+ cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
@@ -560,6 +566,7 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
+void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
enum mlx5e_rq_flag {
MLX5E_RQ_FLAG_XDP_XMIT,
@@ -650,6 +657,7 @@ struct mlx5e_rq {
/* XDP read-mostly */
struct xdp_rxq_info xdp_rxq;
+ cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
enum mlx5e_channel_state {
@@ -663,11 +671,13 @@ struct mlx5e_channel {
struct mlx5e_xdpsq rq_xdpsq;
struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC];
struct mlx5e_icosq icosq; /* internal control operations */
+ struct mlx5e_txqsq __rcu * __rcu *qos_sqs;
bool xdp;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
__be32 mkey_be;
+ u16 qos_sqs_size;
u8 num_tc;
u8 lag_port;
@@ -756,6 +766,8 @@ struct mlx5e_modify_sq_param {
int next_state;
int rl_update;
int rl_index;
+ bool qos_update;
+ u16 qos_queue_group_id;
};
#if IS_ENABLED(CONFIG_PCI_HYPERV_INTERFACE)
@@ -788,10 +800,22 @@ struct mlx5e_scratchpad {
cpumask_var_t cpumask;
};
+struct mlx5e_htb {
+ DECLARE_HASHTABLE(qos_tc2node, order_base_2(MLX5E_QOS_MAX_LEAF_NODES));
+ DECLARE_BITMAP(qos_used_qids, MLX5E_QOS_MAX_LEAF_NODES);
+ struct mlx5e_sq_stats **qos_sq_stats;
+ u16 max_qos_sqs;
+ u16 maj_id;
+ u16 defcls;
+};
+
+struct mlx5e_trap;
+
struct mlx5e_priv {
/* priv data path fields - start */
/* +1 for port ptp ts */
- struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC];
+ struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC +
+ MLX5E_QOS_MAX_LEAF_NODES];
int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -826,8 +850,10 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
+ struct mlx5e_trap *en_trap;
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ struct mlx5e_channel_stats trap_stats;
struct mlx5e_port_ptp_stats port_ptp_stats;
u16 max_nch;
u8 max_opened_tc;
@@ -836,6 +862,7 @@ struct mlx5e_priv {
u16 q_counter;
u16 drop_rq_q_counter;
struct notifier_block events_nb;
+ struct notifier_block blocking_events_nb;
int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
@@ -859,6 +886,7 @@ struct mlx5e_priv {
struct mlx5e_hv_vhca_stats_agent stats_agent;
#endif
struct mlx5e_scratchpad scratchpad;
+ struct mlx5e_htb htb;
};
struct mlx5e_rx_handlers {
@@ -870,8 +898,7 @@ extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
struct mlx5e_profile {
int (*init)(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile, void *ppriv);
+ struct net_device *netdev);
void (*cleanup)(struct mlx5e_priv *priv);
int (*init_rx)(struct mlx5e_priv *priv);
void (*cleanup_rx)(struct mlx5e_priv *priv);
@@ -942,6 +969,8 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
+void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
@@ -986,6 +1015,7 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_preactivate preactivate,
void *context);
+int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
@@ -1010,6 +1040,9 @@ void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
+int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
+ struct mlx5e_params *params, struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
@@ -1020,8 +1053,10 @@ struct mlx5e_create_sq_param;
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
+ u16 qos_queue_group_id,
u32 *sqn);
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
+void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
@@ -1047,6 +1082,8 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
+int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
+void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
@@ -1120,24 +1157,25 @@ int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
-int mlx5e_netdev_init(struct net_device *netdev,
- struct mlx5e_priv *priv,
- struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
-void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv);
-struct net_device*
-mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
- int nch, void *ppriv);
+static inline unsigned int
+mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
+{
+ return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
+}
+
+int mlx5e_priv_init(struct mlx5e_priv *priv,
+ struct net_device *netdev,
+ struct mlx5_core_dev *mdev);
+void mlx5e_priv_cleanup(struct mlx5e_priv *priv);
+struct net_device *
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs);
int mlx5e_attach_netdev(struct mlx5e_priv *priv);
void mlx5e_detach_netdev(struct mlx5e_priv *priv);
void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
+int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *new_profile, void *new_ppriv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
-void mlx5e_build_nic_params(struct mlx5e_priv *priv,
- struct mlx5e_xsk *xsk,
- struct mlx5e_rss_params *rss_params,
- struct mlx5e_params *params,
- u16 mtu);
+void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 5749557749b0..a16297e7e2ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -44,6 +44,11 @@ struct mlx5e_l2_rule {
#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+struct mlx5e_promisc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_handle *rule;
+};
+
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
@@ -53,6 +58,7 @@ struct mlx5e_vlan_table {
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
+ struct mlx5_flow_handle *trap_rule;
bool cvlan_filter_disabled;
};
@@ -62,7 +68,7 @@ struct mlx5e_l2_table {
struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
struct mlx5e_l2_rule broadcast;
struct mlx5e_l2_rule allmulti;
- struct mlx5e_l2_rule promisc;
+ struct mlx5_flow_handle *trap_rule;
bool broadcast_enabled;
bool allmulti_enabled;
bool promisc_enabled;
@@ -126,7 +132,8 @@ struct mlx5e_ttc_table {
/* NIC prio FTS */
enum {
- MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_PROMISC_FT_LEVEL,
+ MLX5E_VLAN_FT_LEVEL,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
@@ -241,6 +248,7 @@ struct mlx5e_flow_steering {
struct mlx5e_ethtool_steering ethtool;
#endif
struct mlx5e_tc_table tc;
+ struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
@@ -288,6 +296,10 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt);
+int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
+void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
+int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
+void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
#endif /* __MLX5E_FLOW_STEER_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 43271a3856ca..36381a2ed5a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -179,7 +179,7 @@ int mlx5e_validate_params(struct mlx5e_priv *priv, struct mlx5e_params *params)
stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
if (stop_room >= sq_size) {
- netdev_err(priv->netdev, "Stop room %hu is bigger than the SQ size %zu\n",
+ netdev_err(priv->netdev, "Stop room %u is bigger than the SQ size %zu\n",
stop_room, sq_size);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 807147d97a0f..ea2cfb04b31a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -118,6 +118,8 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
struct mlx5e_rq_param *param);
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param);
+void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param);
void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 2a2bac30daaa..d57b6f06382f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -3,7 +3,6 @@
#include "en/ptp.h"
#include "en/txrx.h"
-#include "lib/clock.h"
struct mlx5e_skb_cb_hwtstamp {
ktime_t cqe_hwtstamp;
@@ -70,6 +69,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
int budget)
{
struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
+ struct mlx5e_txqsq *sq = &ptpsq->txqsq;
ktime_t hwtstamp;
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
@@ -77,7 +77,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
goto out;
}
- hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe));
+ hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
hwtstamp, ptpsq->cq_stats);
ptpsq->cq_stats->cqe++;
@@ -183,6 +183,9 @@ static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
sq->stop_room = param->stop_room;
+ sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
+ mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
node = dev_to_node(mlx5_core_dma_dev(mdev));
@@ -261,7 +264,7 @@ static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
csp.min_inline_mode = txqsq->min_inline_mode;
csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
- err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, 0, &txqsq->sqn);
if (err)
goto err_free_txqsq;
@@ -428,16 +431,13 @@ static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c,
if (err)
return err;
- napi_enable(&c->napi);
-
err = mlx5e_ptp_open_txqsqs(c, cparams);
if (err)
- goto disable_napi;
+ goto close_cqs;
return 0;
-disable_napi:
- napi_disable(&c->napi);
+close_cqs:
mlx5e_ptp_close_cqs(c);
return err;
@@ -446,7 +446,6 @@ disable_napi:
static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c)
{
mlx5e_ptp_close_txqsqs(c);
- napi_disable(&c->napi);
mlx5e_ptp_close_cqs(c);
}
@@ -515,6 +514,8 @@ void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
{
int tc;
+ napi_enable(&c->napi);
+
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
}
@@ -525,4 +526,6 @@ void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c)
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
+
+ napi_disable(&c->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
new file mode 100644
index 000000000000..12d7ad061237
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -0,0 +1,984 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "en.h"
+#include "params.h"
+#include "../qos.h"
+
+#define BYTES_IN_MBIT 125000
+
+int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
+{
+ return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
+}
+
+int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv)
+{
+ int last = find_last_bit(priv->htb.qos_used_qids, mlx5e_qos_max_leaf_nodes(priv->mdev));
+
+ return last == mlx5e_qos_max_leaf_nodes(priv->mdev) ? 0 : last + 1;
+}
+
+/* Software representation of the QoS tree (internal to this file) */
+
+static int mlx5e_find_unused_qos_qid(struct mlx5e_priv *priv)
+{
+ int size = mlx5e_qos_max_leaf_nodes(priv->mdev);
+ int res;
+
+ WARN_ONCE(!mutex_is_locked(&priv->state_lock), "%s: state_lock is not held\n", __func__);
+ res = find_first_zero_bit(priv->htb.qos_used_qids, size);
+
+ return res == size ? -ENOSPC : res;
+}
+
+struct mlx5e_qos_node {
+ struct hlist_node hnode;
+ struct rcu_head rcu;
+ struct mlx5e_qos_node *parent;
+ u64 rate;
+ u32 bw_share;
+ u32 max_average_bw;
+ u32 hw_id;
+ u32 classid; /* 16-bit, except root. */
+ u16 qid;
+};
+
+#define MLX5E_QOS_QID_INNER 0xffff
+#define MLX5E_HTB_CLASSID_ROOT 0xffffffff
+
+static struct mlx5e_qos_node *
+mlx5e_sw_node_create_leaf(struct mlx5e_priv *priv, u16 classid, u16 qid,
+ struct mlx5e_qos_node *parent)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->parent = parent;
+
+ node->qid = qid;
+ __set_bit(qid, priv->htb.qos_used_qids);
+
+ node->classid = classid;
+ hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, classid);
+
+ mlx5e_update_tx_netdev_queues(priv);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_create_root(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *node;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->qid = MLX5E_QOS_QID_INNER;
+ node->classid = MLX5E_HTB_CLASSID_ROOT;
+ hash_add_rcu(priv->htb.qos_tc2node, &node->hnode, node->classid);
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find(struct mlx5e_priv *priv, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible(priv->htb.qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find_rcu(struct mlx5e_priv *priv, u32 classid)
+{
+ struct mlx5e_qos_node *node = NULL;
+
+ hash_for_each_possible_rcu(priv->htb.qos_tc2node, node, hnode, classid) {
+ if (node->classid == classid)
+ break;
+ }
+
+ return node;
+}
+
+static void mlx5e_sw_node_delete(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+{
+ hash_del_rcu(&node->hnode);
+ if (node->qid != MLX5E_QOS_QID_INNER) {
+ __clear_bit(node->qid, priv->htb.qos_used_qids);
+ mlx5e_update_tx_netdev_queues(priv);
+ }
+ kfree_rcu(node, rcu);
+}
+
+/* TX datapath API */
+
+static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid)
+{
+ /* These channel params are safe to access from the datapath, because:
+ * 1. This function is called only after checking priv->htb.maj_id != 0,
+ * and the number of queues can't change while HTB offload is active.
+ * 2. When priv->htb.maj_id becomes 0, synchronize_rcu waits for
+ * mlx5e_select_queue to finish while holding priv->state_lock,
+ * preventing other code from changing the number of queues.
+ */
+ bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS);
+
+ return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid;
+}
+
+int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid)
+{
+ struct mlx5e_qos_node *node;
+ u16 qid;
+ int res;
+
+ rcu_read_lock();
+
+ node = mlx5e_sw_node_find_rcu(priv, classid);
+ if (!node) {
+ res = -ENOENT;
+ goto out;
+ }
+ qid = READ_ONCE(node->qid);
+ if (qid == MLX5E_QOS_QID_INNER) {
+ res = -EINVAL;
+ goto out;
+ }
+ res = mlx5e_qid_from_qos(&priv->channels, qid);
+
+out:
+ rcu_read_unlock();
+ return res;
+}
+
+static struct mlx5e_txqsq *mlx5e_get_qos_sq(struct mlx5e_priv *priv, int qid)
+{
+ struct mlx5e_params *params = &priv->channels.params;
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_channel *c;
+ int ix;
+
+ ix = qid % params->num_channels;
+ qid /= params->num_channels;
+ c = priv->channels.c[ix];
+
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ return mlx5e_state_dereference(priv, qos_sqs[qid]);
+}
+
+/* SQ lifecycle */
+
+static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs,
+ struct mlx5e_qos_node *node)
+{
+ struct mlx5e_create_cq_param ccp = {};
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_sq_param param_sq;
+ struct mlx5e_cq_param param_cq;
+ int txq_ix, ix, qid, err = 0;
+ struct mlx5e_params *params;
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+
+ params = &chs->params;
+
+ txq_ix = mlx5e_qid_from_qos(chs, node->qid);
+
+ WARN_ON(node->qid > priv->htb.max_qos_sqs);
+ if (node->qid == priv->htb.max_qos_sqs) {
+ struct mlx5e_sq_stats *stats, **stats_list = NULL;
+
+ if (priv->htb.max_qos_sqs == 0) {
+ stats_list = kvcalloc(mlx5e_qos_max_leaf_nodes(priv->mdev),
+ sizeof(*stats_list),
+ GFP_KERNEL);
+ if (!stats_list)
+ return -ENOMEM;
+ }
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats) {
+ kvfree(stats_list);
+ return -ENOMEM;
+ }
+ if (stats_list)
+ WRITE_ONCE(priv->htb.qos_sq_stats, stats_list);
+ WRITE_ONCE(priv->htb.qos_sq_stats[node->qid], stats);
+ /* Order max_qos_sqs increment after writing the array pointer.
+ * Pairs with smp_load_acquire in en_stats.c.
+ */
+ smp_store_release(&priv->htb.max_qos_sqs, priv->htb.max_qos_sqs + 1);
+ }
+
+ ix = node->qid % params->num_channels;
+ qid = node->qid / params->num_channels;
+ c = chs->c[ix];
+
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ sq = kzalloc(sizeof(*sq), GFP_KERNEL);
+
+ if (!sq)
+ return -ENOMEM;
+
+ mlx5e_build_create_cq_param(&ccp, c);
+
+ memset(&param_sq, 0, sizeof(param_sq));
+ memset(&param_cq, 0, sizeof(param_cq));
+ mlx5e_build_sq_param(priv, params, &param_sq);
+ mlx5e_build_tx_cq_param(priv, params, &param_cq);
+ err = mlx5e_open_cq(priv, params->tx_cq_moderation, &param_cq, &ccp, &sq->cq);
+ if (err)
+ goto err_free_sq;
+ err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
+ &param_sq, sq, 0, node->hw_id, node->qid);
+ if (err)
+ goto err_close_cq;
+
+ rcu_assign_pointer(qos_sqs[qid], sq);
+
+ return 0;
+
+err_close_cq:
+ mlx5e_close_cq(&sq->cq);
+err_free_sq:
+ kfree(sq);
+ return err;
+}
+
+static void mlx5e_activate_qos_sq(struct mlx5e_priv *priv, struct mlx5e_qos_node *node)
+{
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_get_qos_sq(priv, node->qid);
+
+ WRITE_ONCE(priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, node->qid)], sq);
+
+ /* Make the change to txq2sq visible before the queue is started.
+ * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
+ * which pairs with this barrier.
+ */
+ smp_wmb();
+
+ qos_dbg(priv->mdev, "Activate QoS SQ qid %u\n", node->qid);
+ mlx5e_activate_txqsq(sq);
+}
+
+static void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_get_qos_sq(priv, qid);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ return;
+
+ qos_dbg(priv->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ mlx5e_deactivate_txqsq(sq);
+
+ /* The queue is disabled, no synchronization with datapath is needed. */
+ priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+}
+
+static void mlx5e_close_qos_sq(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ struct mlx5e_params *params;
+ struct mlx5e_channel *c;
+ struct mlx5e_txqsq *sq;
+ int ix;
+
+ params = &priv->channels.params;
+
+ ix = qid % params->num_channels;
+ qid /= params->num_channels;
+ c = priv->channels.c[ix];
+ qos_sqs = mlx5e_state_dereference(priv, c->qos_sqs);
+ sq = rcu_replace_pointer(qos_sqs[qid], NULL, lockdep_is_held(&priv->state_lock));
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ return;
+
+ synchronize_rcu(); /* Sync with NAPI. */
+
+ mlx5e_close_txqsq(sq);
+ mlx5e_close_cq(&sq->cq);
+ kfree(sq);
+}
+
+void mlx5e_qos_close_queues(struct mlx5e_channel *c)
+{
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ int i;
+
+ qos_sqs = rcu_replace_pointer(c->qos_sqs, NULL, lockdep_is_held(&c->priv->state_lock));
+ if (!qos_sqs)
+ return;
+ synchronize_rcu(); /* Sync with NAPI. */
+
+ for (i = 0; i < c->qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ continue;
+
+ mlx5e_close_txqsq(sq);
+ mlx5e_close_cq(&sq->cq);
+ kfree(sq);
+ }
+
+ kvfree(qos_sqs);
+}
+
+static void mlx5e_qos_close_all_queues(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_qos_close_queues(chs->c[i]);
+}
+
+static int mlx5e_qos_alloc_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+{
+ u16 qos_sqs_size;
+ int i;
+
+ qos_sqs_size = DIV_ROUND_UP(mlx5e_qos_max_leaf_nodes(priv->mdev), chs->num);
+
+ for (i = 0; i < chs->num; i++) {
+ struct mlx5e_txqsq **sqs;
+
+ sqs = kvcalloc(qos_sqs_size, sizeof(struct mlx5e_txqsq *), GFP_KERNEL);
+ if (!sqs)
+ goto err_free;
+
+ WRITE_ONCE(chs->c[i]->qos_sqs_size, qos_sqs_size);
+ smp_wmb(); /* Pairs with mlx5e_napi_poll. */
+ rcu_assign_pointer(chs->c[i]->qos_sqs, sqs);
+ }
+
+ return 0;
+
+err_free:
+ while (--i >= 0) {
+ struct mlx5e_txqsq **sqs;
+
+ sqs = rcu_replace_pointer(chs->c[i]->qos_sqs, NULL,
+ lockdep_is_held(&priv->state_lock));
+
+ synchronize_rcu(); /* Sync with NAPI. */
+ kvfree(sqs);
+ }
+ return -ENOMEM;
+}
+
+int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt, err;
+
+ if (!priv->htb.maj_id)
+ return 0;
+
+ err = mlx5e_qos_alloc_queues(priv, chs);
+ if (err)
+ return err;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ err = mlx5e_open_qos_sq(priv, chs, node);
+ if (err) {
+ mlx5e_qos_close_all_queues(chs);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+void mlx5e_qos_activate_queues(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode) {
+ if (node->qid == MLX5E_QOS_QID_INNER)
+ continue;
+ mlx5e_activate_qos_sq(priv, node);
+ }
+}
+
+void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
+{
+ struct mlx5e_params *params = &c->priv->channels.params;
+ struct mlx5e_txqsq __rcu **qos_sqs;
+ int i;
+
+ qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
+ if (!qos_sqs)
+ return;
+
+ for (i = 0; i < c->qos_sqs_size; i++) {
+ u16 qid = params->num_channels * i + c->ix;
+ struct mlx5e_txqsq *sq;
+
+ sq = mlx5e_state_dereference(c->priv, qos_sqs[i]);
+ if (!sq) /* Handle the case when the SQ failed to open. */
+ continue;
+
+ qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
+ mlx5e_deactivate_txqsq(sq);
+
+ /* The queue is disabled, no synchronization with datapath is needed. */
+ c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
+ }
+}
+
+static void mlx5e_qos_deactivate_all_queues(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_qos_deactivate_queues(chs->c[i]);
+}
+
+/* HTB API */
+
+int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *root;
+ bool opened;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_CREATE handle %04x:, default :%04x\n", htb_maj_id, htb_defcls);
+
+ if (!mlx5_qos_is_supported(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
+ }
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ err = mlx5e_qos_alloc_queues(priv, &priv->channels);
+ if (err)
+ return err;
+ }
+
+ root = mlx5e_sw_node_create_root(priv);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto err_free_queues;
+ }
+
+ err = mlx5_qos_create_root_node(priv->mdev, &root->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error. Try upgrading firmware.");
+ goto err_sw_node_delete;
+ }
+
+ WRITE_ONCE(priv->htb.defcls, htb_defcls);
+ /* Order maj_id after defcls - pairs with
+ * mlx5e_select_queue/mlx5e_select_htb_queues.
+ */
+ smp_store_release(&priv->htb.maj_id, htb_maj_id);
+
+ return 0;
+
+err_sw_node_delete:
+ mlx5e_sw_node_delete(priv, root);
+
+err_free_queues:
+ if (opened)
+ mlx5e_qos_close_all_queues(&priv->channels);
+ return err;
+}
+
+int mlx5e_htb_root_del(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qos_node *root;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_DESTROY\n");
+
+ WRITE_ONCE(priv->htb.maj_id, 0);
+ synchronize_rcu(); /* Sync with mlx5e_select_htb_queue and TX data path. */
+
+ root = mlx5e_sw_node_find(priv, MLX5E_HTB_CLASSID_ROOT);
+ if (!root) {
+ qos_err(priv->mdev, "Failed to find the root node in the QoS tree\n");
+ return -ENOENT;
+ }
+ err = mlx5_qos_destroy_node(priv->mdev, root->hw_id);
+ if (err)
+ qos_err(priv->mdev, "Failed to destroy root node %u, err = %d\n",
+ root->hw_id, err);
+ mlx5e_sw_node_delete(priv, root);
+
+ mlx5e_qos_deactivate_all_queues(&priv->channels);
+ mlx5e_qos_close_all_queues(&priv->channels);
+
+ return err;
+}
+
+static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
+ struct mlx5e_qos_node *parent, u32 *bw_share)
+{
+ u64 share = 0;
+
+ while (parent->classid != MLX5E_HTB_CLASSID_ROOT && !parent->max_average_bw)
+ parent = parent->parent;
+
+ if (parent->max_average_bw)
+ share = div64_u64(div_u64(rate * 100, BYTES_IN_MBIT),
+ parent->max_average_bw);
+ else
+ share = 101;
+
+ *bw_share = share == 0 ? 1 : share > 100 ? 0 : share;
+
+ qos_dbg(priv->mdev, "Convert: rate %llu, parent ceil %llu -> bw_share %u\n",
+ rate, (u64)parent->max_average_bw * BYTES_IN_MBIT, *bw_share);
+
+ return 0;
+}
+
+static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
+{
+ *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
+
+ qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
+ ceil, *max_average_bw);
+}
+
+int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ int qid;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_ALLOC_QUEUE classid %04x, parent %04x, rate %llu, ceil %llu\n",
+ classid, parent_classid, rate, ceil);
+
+ qid = mlx5e_find_unused_qos_qid(priv);
+ if (qid < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum amount of leaf classes is reached.");
+ return qid;
+ }
+
+ parent = mlx5e_sw_node_find(priv, parent_classid);
+ if (!parent)
+ return -EINVAL;
+
+ node = mlx5e_sw_node_create_leaf(priv, classid, qid, parent);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node->parent, &node->bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &node->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ mlx5e_sw_node_delete(priv, node);
+ return err;
+ }
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ return mlx5e_qid_from_qos(&priv->channels, node->qid);
+}
+
+int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *child;
+ int err, tmp_err;
+ u32 new_hw_id;
+ u16 qid;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_TO_INNER classid %04x, upcoming child %04x, rate %llu, ceil %llu\n",
+ classid, child_classid, rate, ceil);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_inner_node(priv->mdev, node->parent->hw_id,
+ node->bw_share, node->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating an inner node.");
+ qos_err(priv->mdev, "Failed to create an inner node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ /* Intentionally reuse the qid for the upcoming first child. */
+ child = mlx5e_sw_node_create_leaf(priv, child_classid, node->qid, node);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto err_destroy_hw_node;
+ }
+
+ child->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node, &child->bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &child->max_average_bw);
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, new_hw_id, child->bw_share,
+ child->max_average_bw, &child->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ goto err_delete_sw_node;
+ }
+
+ /* No fail point. */
+
+ qid = node->qid;
+ /* Pairs with mlx5e_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, child);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, child);
+ }
+ }
+
+ return 0;
+
+err_delete_sw_node:
+ child->qid = MLX5E_QOS_QID_INNER;
+ mlx5e_sw_node_delete(priv, child);
+
+err_destroy_hw_node:
+ tmp_err = mlx5_qos_destroy_node(priv->mdev, new_hw_id);
+ if (tmp_err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to roll back creation of an inner node %u (class %04x), err = %d\n",
+ new_hw_id, classid, tmp_err);
+ return err;
+}
+
+static struct mlx5e_qos_node *mlx5e_sw_node_find_by_qid(struct mlx5e_priv *priv, u16 qid)
+{
+ struct mlx5e_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, node, hnode)
+ if (node->qid == qid)
+ break;
+
+ return node;
+}
+
+static void mlx5e_reactivate_qos_sq(struct mlx5e_priv *priv, u16 qid, struct netdev_queue *txq)
+{
+ qos_dbg(priv->mdev, "Reactivate QoS SQ qid %u\n", qid);
+ netdev_tx_reset_queue(txq);
+ netif_tx_start_queue(txq);
+}
+
+static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
+{
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+
+ if (!qdisc)
+ return;
+
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+}
+
+int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
+ u16 *new_qid, struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node;
+ struct netdev_queue *txq;
+ u16 qid, moved_qid;
+ bool opened;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid);
+
+ *old_qid = *new_qid = 0;
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ /* Store qid for reuse. */
+ qid = node->qid;
+
+ opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (opened) {
+ txq = netdev_get_tx_queue(priv->netdev,
+ mlx5e_qid_from_qos(&priv->channels, qid));
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ mlx5e_sw_node_delete(priv, node);
+
+ moved_qid = mlx5e_qos_cur_leaf_nodes(priv);
+
+ if (moved_qid == 0) {
+ /* The last QoS SQ was just destroyed. */
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+ moved_qid--;
+
+ if (moved_qid < qid) {
+ /* The highest QoS SQ was just destroyed. */
+ WARN(moved_qid != qid - 1, "Gaps in queue numeration: destroyed queue %u, the highest queue is %u",
+ qid, moved_qid);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, qid, txq);
+ return 0;
+ }
+
+ WARN(moved_qid == qid, "Can't move node with qid %u to itself", qid);
+ qos_dbg(priv->mdev, "Moving QoS SQ %u to %u\n", moved_qid, qid);
+
+ node = mlx5e_sw_node_find_by_qid(priv, moved_qid);
+ WARN(!node, "Could not find a node with qid %u to move to queue %u",
+ moved_qid, qid);
+
+ /* Stop traffic to the old queue. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+ __clear_bit(moved_qid, priv->htb.qos_used_qids);
+
+ if (opened) {
+ txq = netdev_get_tx_queue(priv->netdev,
+ mlx5e_qid_from_qos(&priv->channels, moved_qid));
+ mlx5e_deactivate_qos_sq(priv, moved_qid);
+ mlx5e_close_qos_sq(priv, moved_qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(priv->netdev, moved_qid);
+
+ __set_bit(qid, priv->htb.qos_used_qids);
+ WRITE_ONCE(node->qid, qid);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x) while moving qid %u to %u, err = %d\n",
+ node->classid, moved_qid, qid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ mlx5e_update_tx_netdev_queues(priv);
+ if (opened)
+ mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
+
+ *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid);
+ *new_qid = mlx5e_qid_from_qos(&priv->channels, qid);
+ return 0;
+}
+
+int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *node, *parent;
+ u32 old_hw_id, new_hw_id;
+ int err, saved_err = 0;
+ u16 qid;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL_LAST%s classid %04x\n",
+ force ? "_FORCE" : "", classid);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ err = mlx5_qos_create_leaf_node(priv->mdev, node->parent->parent->hw_id,
+ node->parent->bw_share,
+ node->parent->max_average_bw,
+ &new_hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when creating a leaf node.");
+ qos_err(priv->mdev, "Failed to create a leaf node (class %04x), err = %d\n",
+ classid, err);
+ if (!force)
+ return err;
+ saved_err = err;
+ }
+
+ /* Store qid for reuse and prevent clearing the bit. */
+ qid = node->qid;
+ /* Pairs with mlx5e_get_txq_by_classid. */
+ WRITE_ONCE(node->qid, MLX5E_QOS_QID_INNER);
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_deactivate_qos_sq(priv, qid);
+ mlx5e_close_qos_sq(priv, qid);
+ }
+
+ /* Prevent packets from the old class from getting into the new one. */
+ mlx5e_reset_qdisc(priv->netdev, qid);
+
+ err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ parent = node->parent;
+ mlx5e_sw_node_delete(priv, node);
+
+ node = parent;
+ WRITE_ONCE(node->qid, qid);
+
+ /* Early return on error in force mode. Parent will still be an inner
+ * node to be deleted by a following delete operation.
+ */
+ if (saved_err)
+ return saved_err;
+
+ old_hw_id = node->hw_id;
+ node->hw_id = new_hw_id;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ err = mlx5e_open_qos_sq(priv, &priv->channels, node);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Error creating an SQ.");
+ qos_warn(priv->mdev, "Failed to create a QoS SQ (class %04x), err = %d\n",
+ classid, err);
+ } else {
+ mlx5e_activate_qos_sq(priv, node);
+ }
+ }
+
+ err = mlx5_qos_destroy_node(priv->mdev, old_hw_id);
+ if (err) /* Not fatal. */
+ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
+ node->hw_id, classid, err);
+
+ return 0;
+}
+
+static int mlx5e_qos_update_children(struct mlx5e_priv *priv, struct mlx5e_qos_node *node,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_qos_node *child;
+ int err = 0;
+ int bkt;
+
+ hash_for_each(priv->htb.qos_tc2node, bkt, child, hnode) {
+ u32 old_bw_share = child->bw_share;
+ int err_one;
+
+ if (child->parent != node)
+ continue;
+
+ mlx5e_htb_convert_rate(priv, child->rate, node, &child->bw_share);
+ if (child->bw_share == old_bw_share)
+ continue;
+
+ err_one = mlx5_qos_update_node(priv->mdev, child->hw_id, child->bw_share,
+ child->max_average_bw, child->hw_id);
+ if (!err && err_one) {
+ err = err_one;
+
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a child node.");
+ qos_err(priv->mdev, "Failed to modify a child node (class %04x), err = %d\n",
+ node->classid, err);
+ }
+ }
+
+ return err;
+}
+
+int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack)
+{
+ u32 bw_share, max_average_bw;
+ struct mlx5e_qos_node *node;
+ bool ceil_changed = false;
+ int err;
+
+ qos_dbg(priv->mdev, "TC_HTB_LEAF_MODIFY classid %04x, rate %llu, ceil %llu\n",
+ classid, rate, ceil);
+
+ node = mlx5e_sw_node_find(priv, classid);
+ if (!node)
+ return -ENOENT;
+
+ node->rate = rate;
+ mlx5e_htb_convert_rate(priv, rate, node->parent, &bw_share);
+ mlx5e_htb_convert_ceil(priv, ceil, &max_average_bw);
+
+ err = mlx5_qos_update_node(priv->mdev, node->parent->hw_id, bw_share,
+ max_average_bw, node->hw_id);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
+ qos_err(priv->mdev, "Failed to modify a node (class %04x), err = %d\n",
+ classid, err);
+ return err;
+ }
+
+ if (max_average_bw != node->max_average_bw)
+ ceil_changed = true;
+
+ node->bw_share = bw_share;
+ node->max_average_bw = max_average_bw;
+
+ if (ceil_changed)
+ err = mlx5e_qos_update_children(priv, node, extack);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
new file mode 100644
index 000000000000..5af7991fcd19
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5E_EN_QOS_H
+#define __MLX5E_EN_QOS_H
+
+#include <linux/mlx5/driver.h>
+
+#define MLX5E_QOS_MAX_LEAF_NODES 256
+
+struct mlx5e_priv;
+struct mlx5e_channels;
+struct mlx5e_channel;
+
+int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
+int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
+
+/* TX datapath API */
+int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid);
+struct mlx5e_txqsq *mlx5e_get_sq(struct mlx5e_priv *priv, int qid);
+
+/* SQ lifecycle */
+int mlx5e_qos_open_queues(struct mlx5e_priv *priv, struct mlx5e_channels *chs);
+void mlx5e_qos_activate_queues(struct mlx5e_priv *priv);
+void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c);
+void mlx5e_qos_close_queues(struct mlx5e_channel *c);
+
+/* HTB API */
+int mlx5e_htb_root_add(struct mlx5e_priv *priv, u16 htb_maj_id, u16 htb_defcls,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_root_del(struct mlx5e_priv *priv);
+int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
+ u32 parent_classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
+ u64 rate, u64 ceil, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
+ u16 *new_qid, struct netlink_ext_ack *extack);
+int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
+ struct netlink_ext_ack *extack);
+int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
+ struct netlink_ext_ack *extack);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
index 58e27038c947..be0ee03de721 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -129,10 +129,10 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
work);
struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
struct neighbour *n = update_work->n;
+ bool neigh_connected, same_dev;
struct mlx5e_encap_entry *e;
unsigned char ha[ETH_ALEN];
struct mlx5e_priv *priv;
- bool neigh_connected;
u8 nud_state, dead;
rtnl_lock();
@@ -146,12 +146,16 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
memcpy(ha, n->ha, ETH_ALEN);
nud_state = n->nud_state;
dead = n->dead;
+ same_dev = READ_ONCE(nhe->neigh_dev) == n->dev;
read_unlock_bh(&n->lock);
neigh_connected = (nud_state & NUD_VALID) && !dead;
trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
+ if (!same_dev)
+ goto out;
+
list_for_each_entry(e, &nhe->encap_list, encap_list) {
if (!mlx5e_encap_take(e))
continue;
@@ -160,6 +164,7 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
mlx5e_encap_put(priv, e);
}
+out:
rtnl_unlock();
mlx5e_release_neigh_update_work(update_work);
}
@@ -175,7 +180,6 @@ static struct neigh_update_work *mlx5e_alloc_neigh_update_work(struct mlx5e_priv
if (WARN_ON(!update_work))
return NULL;
- m_neigh.dev = n->dev;
m_neigh.family = n->ops->family;
memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
@@ -246,7 +250,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
rcu_read_lock();
list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
neigh_list) {
- if (p->dev == nhe->m_neigh.dev) {
+ if (p->dev == READ_ONCE(nhe->neigh_dev)) {
found = true;
break;
}
@@ -279,7 +283,7 @@ int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
if (err)
- return err;
+ goto out_err;
INIT_LIST_HEAD(&neigh_update->neigh_list);
mutex_init(&neigh_update->encap_lock);
@@ -287,14 +291,19 @@ int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
mlx5e_rep_neigh_stats_work);
mlx5e_rep_neigh_update_init_interval(rpriv);
- rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
- err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
+ neigh_update->netevent_nb.notifier_call = mlx5e_rep_netevent_event;
+ err = register_netevent_notifier(&neigh_update->netevent_nb);
if (err)
- goto out_err;
+ goto out_notifier;
return 0;
-out_err:
+out_notifier:
+ neigh_update->netevent_nb.notifier_call = NULL;
rhashtable_destroy(&neigh_update->neigh_ht);
+out_err:
+ netdev_warn(rpriv->netdev,
+ "Failed to initialize neighbours handling for vport %d\n",
+ rpriv->rep->vport);
return err;
}
@@ -303,6 +312,9 @@ void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ if (!rpriv->neigh_update.netevent_nb.notifier_call)
+ return;
+
unregister_netevent_notifier(&neigh_update->netevent_nb);
flush_workqueue(priv->wq); /* flush neigh update works */
@@ -361,7 +373,8 @@ mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
}
int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh *m_neigh,
+ struct net_device *neigh_dev,
struct mlx5e_neigh_hash_entry **nhe)
{
int err;
@@ -371,10 +384,11 @@ int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
return -ENOMEM;
(*nhe)->priv = priv;
- memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
+ memcpy(&(*nhe)->m_neigh, m_neigh, sizeof(*m_neigh));
spin_lock_init(&(*nhe)->encap_list_lock);
INIT_LIST_HEAD(&(*nhe)->encap_list);
refcount_set(&(*nhe)->refcnt, 1);
+ WRITE_ONCE((*nhe)->neigh_dev, neigh_dev);
err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
index 32b239189c95..6fe0ab970943 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.h
@@ -16,7 +16,8 @@ struct mlx5e_neigh_hash_entry *
mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
struct mlx5e_neigh *m_neigh);
int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh *m_neigh,
+ struct net_device *neigh_dev,
struct mlx5e_neigh_hash_entry **nhe);
void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 76177f7c5ec2..065126370acd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -26,7 +26,9 @@ struct mlx5e_rep_indr_block_priv {
};
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e)
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh *m_neigh,
+ struct net_device *neigh_dev)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
@@ -39,9 +41,9 @@ int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
return err;
mutex_lock(&rpriv->neigh_update.encap_lock);
- nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
+ nhe = mlx5e_rep_neigh_entry_lookup(priv, m_neigh);
if (!nhe) {
- err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
+ err = mlx5e_rep_neigh_entry_create(priv, m_neigh, neigh_dev, &nhe);
if (err) {
mutex_unlock(&rpriv->neigh_update.encap_lock);
mlx5_tun_entropy_refcount_dec(tun_entropy,
@@ -122,7 +124,7 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
}
unlock:
mutex_unlock(&esw->offloads.encap_tbl_lock);
- mlx5e_put_encap_flow_list(priv, &flow_list);
+ mlx5e_put_flow_list(priv, &flow_list);
}
static int
@@ -651,7 +653,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
- zone_restore_id = reg_c1 & ZONE_RESTORE_MAX;
+ zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
@@ -660,7 +662,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
return false;
}
- tunnel_id = reg_c1 >> REG_MAPPING_SHIFT(TUNNEL_TO_REG);
+ tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
#endif /* CONFIG_NET_TC_SKB_EXT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
index fdf9702c2d7d..d0661578467b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -27,7 +27,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
unsigned char ha[ETH_ALEN]);
int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e);
+ struct mlx5e_encap_entry *e,
+ struct mlx5e_neigh *m_neigh,
+ struct net_device *neigh_dev);
void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 6bc6b48a56dc..f3f6eb081948 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -12,6 +12,7 @@
#include <net/flow_offload.h>
#include <net/netfilter/nf_flow_table.h>
#include <linux/workqueue.h>
+#include <linux/refcount.h>
#include <linux/xarray.h>
#include "lib/fs_chains.h"
@@ -27,6 +28,7 @@
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
#define MLX5_CT_STATE_NAT_BIT BIT(3)
+#define MLX5_CT_STATE_REPLY_BIT BIT(4)
#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
@@ -51,11 +53,11 @@ struct mlx5_tc_ct_priv {
struct mlx5_flow_table *ct_nat;
struct mlx5_flow_table *post_ct;
struct mutex control_lock; /* guards parallel adds/dels */
- struct mutex shared_counter_lock;
struct mapping_ctx *zone_mapping;
struct mapping_ctx *labels_mapping;
enum mlx5_flow_namespace_type ns_type;
struct mlx5_fs_chains *chains;
+ spinlock_t ht_lock; /* protects ft entries */
};
struct mlx5_ct_flow {
@@ -124,6 +126,10 @@ struct mlx5_ct_counter {
bool is_shared;
};
+enum {
+ MLX5_CT_ENTRY_FLAG_VALID,
+};
+
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
@@ -134,6 +140,12 @@ struct mlx5_ct_entry {
struct mlx5_ct_tuple tuple;
struct mlx5_ct_tuple tuple_nat;
struct mlx5_ct_zone_rule zone_rules[2];
+
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct work_struct work;
+
+ refcount_t refcnt;
+ unsigned long flags;
};
static const struct rhashtable_params cts_ht_params = {
@@ -641,6 +653,7 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
}
ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT;
+ ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT;
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
ct_state,
meta->ct_metadata.mark,
@@ -709,11 +722,11 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->outer_match_level = MLX5_MATCH_L4;
attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
+ if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ attr->esw_attr->in_mdev = priv->mdev;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
- mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
- entry->tuple.zone & MLX5_CT_ZONE_MASK,
- MLX5_CT_ZONE_MASK);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
if (IS_ERR(zone_rule->rule)) {
@@ -740,6 +753,87 @@ err_attr:
return err;
}
+static bool
+mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
+{
+ return test_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
+}
+
+static struct mlx5_ct_entry *
+mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple *tuple)
+{
+ struct mlx5_ct_entry *entry;
+
+ entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, tuple,
+ tuples_ht_params);
+ if (entry && mlx5_tc_ct_entry_valid(entry) &&
+ refcount_inc_not_zero(&entry->refcnt)) {
+ return entry;
+ } else if (!entry) {
+ entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
+ tuple, tuples_nat_ht_params);
+ if (entry && mlx5_tc_ct_entry_valid(entry) &&
+ refcount_inc_not_zero(&entry->refcnt))
+ return entry;
+ }
+
+ return entry ? ERR_PTR(-EINVAL) : NULL;
+}
+
+static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
+{
+ struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
+
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node,
+ tuples_nat_ht_params);
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+ tuples_ht_params);
+}
+
+static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
+{
+ struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
+
+ mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+
+ spin_lock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_remove_from_tuples(entry);
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+ mlx5_tc_ct_counter_put(ct_priv, entry);
+ kfree(entry);
+}
+
+static void
+mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
+{
+ if (!refcount_dec_and_test(&entry->refcnt))
+ return;
+
+ mlx5_tc_ct_entry_del(entry);
+}
+
+static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
+{
+ struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work);
+
+ mlx5_tc_ct_entry_del(entry);
+}
+
+static void
+__mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
+{
+ struct mlx5e_priv *priv;
+
+ if (!refcount_dec_and_test(&entry->refcnt))
+ return;
+
+ priv = netdev_priv(entry->ct_priv->netdev);
+ INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
+ queue_work(priv->wq, &entry->work);
+}
+
static struct mlx5_ct_counter *
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
{
@@ -770,7 +864,6 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
- int ret;
/* get the reversed tuple */
tmp_port = rev_tuple.port.src;
@@ -792,23 +885,31 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
}
/* Use the same counter as the reverse direction */
- mutex_lock(&ct_priv->shared_counter_lock);
- rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
- tuples_ht_params);
- if (rev_entry) {
- if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
- mutex_unlock(&ct_priv->shared_counter_lock);
- return rev_entry->counter;
- }
+ spin_lock_bh(&ct_priv->ht_lock);
+ rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
+
+ if (IS_ERR(rev_entry)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ goto create_counter;
}
- mutex_unlock(&ct_priv->shared_counter_lock);
- shared_counter = mlx5_tc_ct_counter_create(ct_priv);
- if (IS_ERR(shared_counter)) {
- ret = PTR_ERR(shared_counter);
- return ERR_PTR(ret);
+ if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
+ ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
+ shared_counter = rev_entry->counter;
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+ mlx5_tc_ct_entry_put(rev_entry);
+ return shared_counter;
}
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+create_counter:
+
+ shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ if (IS_ERR(shared_counter))
+ return shared_counter;
+
shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
@@ -866,10 +967,14 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (!meta_action)
return -EOPNOTSUPP;
- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
- cts_ht_params);
- if (entry)
- return 0;
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (entry && refcount_inc_not_zero(&entry->refcnt)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_put(entry);
+ return -EEXIST;
+ }
+ spin_unlock_bh(&ct_priv->ht_lock);
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -878,6 +983,8 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
entry->tuple.zone = ft->zone;
entry->cookie = flow->cookie;
entry->restore_cookie = meta_action->ct_metadata.cookie;
+ refcount_set(&entry->refcnt, 2);
+ entry->ct_priv = ct_priv;
err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
if (err)
@@ -888,35 +995,40 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_set;
- err = rhashtable_insert_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
+ spin_lock_bh(&ct_priv->ht_lock);
+
+ err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node,
+ cts_ht_params);
+ if (err)
+ goto err_entries;
+
+ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
if (err)
goto err_tuple;
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
- err = rhashtable_insert_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node,
- tuples_nat_ht_params);
+ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node,
+ tuples_nat_ht_params);
if (err)
goto err_tuple_nat;
}
+ spin_unlock_bh(&ct_priv->ht_lock);
err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
ft->zone_restore_id);
if (err)
goto err_rules;
- err = rhashtable_insert_fast(&ft->ct_entries_ht, &entry->node,
- cts_ht_params);
- if (err)
- goto err_insert;
+ set_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
+ mlx5_tc_ct_entry_put(entry); /* this function reference */
return 0;
-err_insert:
- mlx5_tc_ct_entry_del_rules(ct_priv, entry);
err_rules:
+ spin_lock_bh(&ct_priv->ht_lock);
if (mlx5_tc_ct_entry_has_nat(entry))
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node, tuples_nat_ht_params);
@@ -925,47 +1037,43 @@ err_tuple_nat:
&entry->tuple_node,
tuples_ht_params);
err_tuple:
+ rhashtable_remove_fast(&ft->ct_entries_ht,
+ &entry->node,
+ cts_ht_params);
+err_entries:
+ spin_unlock_bh(&ct_priv->ht_lock);
err_set:
kfree(entry);
- netdev_warn(ct_priv->netdev,
- "Failed to offload ct entry, err: %d\n", err);
+ if (err != -EEXIST)
+ netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err);
return err;
}
-static void
-mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_ct_entry *entry)
-{
- mlx5_tc_ct_entry_del_rules(ct_priv, entry);
- mutex_lock(&ct_priv->shared_counter_lock);
- if (mlx5_tc_ct_entry_has_nat(entry))
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node,
- tuples_nat_ht_params);
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
- tuples_ht_params);
- mutex_unlock(&ct_priv->shared_counter_lock);
- mlx5_tc_ct_counter_put(ct_priv, entry);
-
-}
-
static int
mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
struct flow_cls_offload *flow)
{
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
unsigned long cookie = flow->cookie;
struct mlx5_ct_entry *entry;
- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
- cts_ht_params);
- if (!entry)
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (!entry) {
+ spin_unlock_bh(&ct_priv->ht_lock);
return -ENOENT;
+ }
- mlx5_tc_ct_del_ft_entry(ft->ct_priv, entry);
- WARN_ON(rhashtable_remove_fast(&ft->ct_entries_ht,
- &entry->node,
- cts_ht_params));
- kfree(entry);
+ if (!mlx5_tc_ct_entry_valid(entry)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ return -EINVAL;
+ }
+
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
+ mlx5_tc_ct_entry_remove_from_tuples(entry);
+ spin_unlock_bh(&ct_priv->ht_lock);
+
+ mlx5_tc_ct_entry_put(entry);
return 0;
}
@@ -974,19 +1082,30 @@ static int
mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
struct flow_cls_offload *f)
{
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
unsigned long cookie = f->cookie;
struct mlx5_ct_entry *entry;
u64 lastuse, packets, bytes;
- entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie,
- cts_ht_params);
- if (!entry)
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (!entry) {
+ spin_unlock_bh(&ct_priv->ht_lock);
return -ENOENT;
+ }
+
+ if (!mlx5_tc_ct_entry_valid(entry) || !refcount_inc_not_zero(&entry->refcnt)) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ return -EINVAL;
+ }
+
+ spin_unlock_bh(&ct_priv->ht_lock);
mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
+ mlx5_tc_ct_entry_put(entry);
return 0;
}
@@ -1088,8 +1207,8 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ bool trk, est, untrk, unest, new, rpl, unrpl;
struct flow_dissector_key_ct *mask, *key;
- bool trk, est, untrk, unest, new;
u32 ctstate = 0, ctstate_mask = 0;
u16 ct_state_on, ct_state_off;
u16 ct_state, ct_state_mask;
@@ -1115,9 +1234,10 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
- TCA_FLOWER_KEY_CT_FLAGS_NEW)) {
+ TCA_FLOWER_KEY_CT_FLAGS_NEW |
+ TCA_FLOWER_KEY_CT_FLAGS_REPLY)) {
NL_SET_ERR_MSG_MOD(extack,
- "only ct_state trk, est and new are supported for offload");
+ "only ct_state trk, est, new and rpl are supported for offload");
return -EOPNOTSUPP;
}
@@ -1126,13 +1246,17 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
+ unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
+ ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
if (new) {
NL_SET_ERR_MSG_MOD(extack,
@@ -1247,9 +1371,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
pre_ct->flow_rule = rule;
/* add miss rule */
- memset(spec, 0, sizeof(*spec));
dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct miss rule zone %d", zone);
@@ -1478,11 +1601,9 @@ err_mapping:
static void
mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
{
- struct mlx5_tc_ct_priv *ct_priv = arg;
struct mlx5_ct_entry *entry = ptr;
- mlx5_tc_ct_del_ft_entry(ct_priv, entry);
- kfree(entry);
+ mlx5_tc_ct_entry_put(entry);
}
static void
@@ -1760,7 +1881,6 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
goto err_set_registers;
}
- dealloc_mod_hdr_actions(mod_acts);
pre_ct_attr->modify_hdr = mod_hdr;
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
@@ -1960,6 +2080,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
goto err_mapping_labels;
}
+ spin_lock_init(&ct_priv->ht_lock);
ct_priv->ns_type = ns_type;
ct_priv->chains = chains;
ct_priv->netdev = priv->netdev;
@@ -1994,7 +2115,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
idr_init(&ct_priv->fte_ids);
mutex_init(&ct_priv->control_lock);
- mutex_init(&ct_priv->shared_counter_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params);
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
@@ -2037,7 +2157,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock);
- mutex_destroy(&ct_priv->shared_counter_lock);
idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv);
}
@@ -2059,14 +2178,22 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
return false;
- entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &tuple,
- tuples_ht_params);
- if (!entry)
- entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
- &tuple, tuples_nat_ht_params);
- if (!entry)
+ spin_lock(&ct_priv->ht_lock);
+
+ entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
+ if (!entry) {
+ spin_unlock(&ct_priv->ht_lock);
+ return false;
+ }
+
+ if (IS_ERR(entry)) {
+ spin_unlock(&ct_priv->ht_lock);
return false;
+ }
+ spin_unlock(&ct_priv->ht_lock);
tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
+ __mlx5_tc_ct_entry_put(entry);
+
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 6503b614337c..69e618d17071 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -73,7 +73,7 @@ struct mlx5_ct_attr {
#define zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
.moffset = 0,\
- .mlen = 1,\
+ .mlen = (ESW_ZONE_ID_BITS / 8),\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_1) + 3,\
}
@@ -81,14 +81,12 @@ struct mlx5_ct_attr {
#define nic_zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\
.moffset = 2,\
- .mlen = 1,\
+ .mlen = (ESW_ZONE_ID_BITS / 8),\
}
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
#define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8)
-#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8)
-#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
new file mode 100644
index 000000000000..c223591ffc22
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_PRIV_H__
+#define __MLX5_EN_TC_PRIV_H__
+
+#include "en_tc.h"
+
+#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
+
+#define MLX5E_TC_MAX_SPLITS 1
+
+enum {
+ MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
+ MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
+ MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
+ MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
+ MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
+ MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
+ MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
+ MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
+ MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
+ MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
+ MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
+ MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
+};
+
+struct mlx5e_tc_flow_parse_attr {
+ const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct net_device *filter_dev;
+ struct mlx5_flow_spec spec;
+ struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
+ int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct ethhdr eth;
+};
+
+/* Helper struct for accessing a struct containing list_head array.
+ * Containing struct
+ * |- Helper array
+ * [0] Helper item 0
+ * |- list_head item 0
+ * |- index (0)
+ * [1] Helper item 1
+ * |- list_head item 1
+ * |- index (1)
+ * To access the containing struct from one of the list_head items:
+ * 1. Get the helper item from the list_head item using
+ * helper item =
+ * container_of(list_head item, helper struct type, list_head field)
+ * 2. Get the contining struct from the helper item and its index in the array:
+ * containing struct =
+ * container_of(helper item, containing struct type, helper field[index])
+ */
+struct encap_flow_item {
+ struct mlx5e_encap_entry *e; /* attached encap instance */
+ struct list_head list;
+ int index;
+};
+
+struct encap_route_flow_item {
+ struct mlx5e_route_entry *r; /* attached route instance */
+ int index;
+};
+
+struct mlx5e_tc_flow {
+ struct rhash_head node;
+ struct mlx5e_priv *priv;
+ u64 cookie;
+ unsigned long flags;
+ struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
+
+ /* flows sharing the same reformat object - currently mpls decap */
+ struct list_head l3_to_l2_reformat;
+ struct mlx5e_decap_entry *decap_reformat;
+
+ /* flows sharing same route entry */
+ struct list_head decap_routes;
+ struct mlx5e_route_entry *decap_route;
+ struct encap_route_flow_item encap_routes[MLX5_MAX_FLOW_FWD_VPORTS];
+
+ /* Flow can be associated with multiple encap IDs.
+ * The number of encaps is bounded by the number of supported
+ * destinations.
+ */
+ struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5e_tc_flow *peer_flow;
+ struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
+ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
+ struct list_head hairpin; /* flows sharing the same hairpin */
+ struct list_head peer; /* flows with peer flow */
+ struct list_head unready; /* flows not ready to be offloaded (e.g
+ * due to missing route)
+ */
+ struct net_device *orig_dev; /* netdev adding flow first */
+ int tmp_entry_index;
+ struct list_head tmp_list; /* temporary flow list used by neigh update */
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+ struct completion init_done;
+ int tunnel_id; /* the mapped tunnel id of this flow */
+ struct mlx5_flow_attr *attr;
+};
+
+u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer);
+
+struct mlx5_flow_handle *
+mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+
+bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
+
+static inline void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before setting bit. */
+ smp_mb__before_atomic();
+ set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
+
+static inline bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
+ unsigned long flag)
+{
+ /* test_and_set_bit() provides all necessary barriers */
+ return test_and_set_bit(flag, &flow->flags);
+}
+
+#define flow_flag_test_and_set(flow, flag) \
+ __flow_flag_test_and_set(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static inline void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ /* Complete all memory stores before clearing bit. */
+ smp_mb__before_atomic();
+ clear_bit(flag, &flow->flags);
+}
+
+#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+static inline bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
+{
+ bool ret = test_bit(flag, &flow->flags);
+
+ /* Read fields of flow structure only after checking flags. */
+ smp_mb__after_atomic();
+ return ret;
+}
+
+#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
+ MLX5E_TC_FLOW_FLAG_##flag)
+
+void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow);
+struct mlx5_flow_handle *
+mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec);
+void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
+
+struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow);
+void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow);
+
+struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
+
+#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 90930e54b6f2..f8075a604605 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -6,10 +6,32 @@
#include <net/geneve.h>
#include <net/bareudp.h>
#include "en/tc_tun.h"
+#include "en/tc_priv.h"
#include "en_tc.h"
#include "rep/tc.h"
#include "rep/neigh.h"
+struct mlx5e_tc_tun_route_attr {
+ struct net_device *out_dev;
+ struct net_device *route_dev;
+ union {
+ struct flowi4 fl4;
+ struct flowi6 fl6;
+ } fl;
+ struct neighbour *n;
+ u8 ttl;
+};
+
+#define TC_TUN_ROUTE_ATTR_INIT(name) struct mlx5e_tc_tun_route_attr name = {}
+
+static void mlx5e_tc_tun_route_attr_cleanup(struct mlx5e_tc_tun_route_attr *attr)
+{
+ if (attr->n)
+ neigh_release(attr->n);
+ if (attr->route_dev)
+ dev_put(attr->route_dev);
+}
+
struct mlx5e_tc_tunnel *mlx5e_get_tc_tun(struct net_device *tunnel_dev)
{
if (netif_is_vxlan(tunnel_dev))
@@ -79,12 +101,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi4 *fl4,
- struct neighbour **out_n,
- u8 *out_ttl)
+ struct mlx5e_tc_tun_route_attr *attr)
{
+ struct net_device *route_dev;
+ struct net_device *out_dev;
struct neighbour *n;
struct rtable *rt;
@@ -97,46 +117,50 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = mdev->priv.eswitch;
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
- fl4->flowi4_oif = uplink_dev->ifindex;
+ attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
}
- rt = ip_route_output_key(dev_net(mirred_dev), fl4);
+ rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
- ip_rt_put(rt);
- return -ENETUNREACH;
+ ret = -ENETUNREACH;
+ goto err_rt_release;
}
#else
return -EOPNOTSUPP;
#endif
- ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
- if (ret < 0) {
- ip_rt_put(rt);
- return ret;
- }
- dev_hold(*route_dev);
+ ret = get_route_and_out_devs(priv, rt->dst.dev, &route_dev, &out_dev);
+ if (ret < 0)
+ goto err_rt_release;
+ dev_hold(route_dev);
- if (!(*out_ttl))
- *out_ttl = ip4_dst_hoplimit(&rt->dst);
- n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
- ip_rt_put(rt);
+ if (!attr->ttl)
+ attr->ttl = ip4_dst_hoplimit(&rt->dst);
+ n = dst_neigh_lookup(&rt->dst, &attr->fl.fl4.daddr);
if (!n) {
- dev_put(*route_dev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_dev_release;
}
- *out_n = n;
+ ip_rt_put(rt);
+ attr->route_dev = route_dev;
+ attr->out_dev = out_dev;
+ attr->n = n;
return 0;
+
+err_dev_release:
+ dev_put(route_dev);
+err_rt_release:
+ ip_rt_put(rt);
+ return ret;
}
-static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
- struct neighbour *n)
+static void mlx5e_route_lookup_ipv4_put(struct mlx5e_tc_tun_route_attr *attr)
{
- neigh_release(n);
- dev_put(route_dev);
+ mlx5e_tc_tun_route_attr_cleanup(attr);
}
static const char *mlx5e_netdev_kind(struct net_device *dev)
@@ -188,28 +212,26 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
- struct net_device *out_dev, *route_dev;
- struct flowi4 fl4 = {};
- struct neighbour *n;
+ struct mlx5e_neigh m_neigh = {};
+ TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size;
char *encap_header;
- u8 nud_state, ttl;
struct iphdr *ip;
+ u8 nud_state;
int err;
/* add the IP fields */
- fl4.flowi4_tos = tun_key->tos;
- fl4.daddr = tun_key->u.ipv4.dst;
- fl4.saddr = tun_key->u.ipv4.src;
- ttl = tun_key->ttl;
+ attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
+ attr.fl.fl4.saddr = tun_key->u.ipv4.src;
+ attr.ttl = tun_key->ttl;
- err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
- &fl4, &n, &ttl);
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr);
if (err)
return err;
ipv4_encap_size =
- (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct iphdr) +
e->tunnel->calc_hlen(e);
@@ -226,40 +248,36 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
goto release_neigh;
}
- /* used by mlx5e_detach_encap to lookup a neigh hash table
- * entry in the neigh hash table when a user deletes a rule
- */
- e->m_neigh.dev = n->dev;
- e->m_neigh.family = n->ops->family;
- memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- e->out_dev = out_dev;
- e->route_dev_ifindex = route_dev->ifindex;
+ m_neigh.family = attr.n->ops->family;
+ memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len);
+ e->out_dev = attr.out_dev;
+ e->route_dev_ifindex = attr.route_dev->ifindex;
/* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
* neigh changes it's validity state, we would find the relevant neigh
* in the hash.
*/
- err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.n->dev);
if (err)
goto free_encap;
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(e->h_dest, n->ha);
- read_unlock_bh(&n->lock);
+ read_lock_bh(&attr.n->lock);
+ nud_state = attr.n->nud_state;
+ ether_addr_copy(e->h_dest, attr.n->ha);
+ read_unlock_bh(&attr.n->lock);
/* add ethernet header */
- ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
+ ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
ETH_P_IP);
/* add ip header */
ip->tos = tun_key->tos;
ip->version = 0x4;
ip->ihl = 0x5;
- ip->ttl = ttl;
- ip->daddr = fl4.daddr;
- ip->saddr = fl4.saddr;
+ ip->ttl = attr.ttl;
+ ip->daddr = attr.fl.fl4.daddr;
+ ip->saddr = attr.fl.fl4.saddr;
/* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
@@ -271,7 +289,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->encap_header = encap_header;
if (!(nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
+ neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
@@ -287,8 +305,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- mlx5e_route_lookup_ipv4_put(route_dev, n);
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
return err;
destroy_neigh_entry:
@@ -296,55 +314,155 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- mlx5e_route_lookup_ipv4_put(route_dev, n);
+ mlx5e_route_lookup_ipv4_put(&attr);
+ return err;
+}
+
+int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ TC_TUN_ROUTE_ATTR_INIT(attr);
+ int ipv4_encap_size;
+ char *encap_header;
+ struct iphdr *ip;
+ u8 nud_state;
+ int err;
+
+ /* add the IP fields */
+ attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
+ attr.fl.fl4.saddr = tun_key->u.ipv4.src;
+ attr.ttl = tun_key->ttl;
+
+ err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &attr);
+ if (err)
+ return err;
+
+ ipv4_encap_size =
+ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ sizeof(struct iphdr) +
+ e->tunnel->calc_hlen(e);
+
+ if (max_encap_size < ipv4_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv4_encap_size, max_encap_size);
+ err = -EOPNOTSUPP;
+ goto release_neigh;
+ }
+
+ encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto release_neigh;
+ }
+
+ e->route_dev_ifindex = attr.route_dev->ifindex;
+
+ read_lock_bh(&attr.n->lock);
+ nud_state = attr.n->nud_state;
+ ether_addr_copy(e->h_dest, attr.n->ha);
+ WRITE_ONCE(e->nhe->neigh_dev, attr.n->dev);
+ read_unlock_bh(&attr.n->lock);
+
+ /* add ethernet header */
+ ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
+ ETH_P_IP);
+
+ /* add ip header */
+ ip->tos = tun_key->tos;
+ ip->version = 0x4;
+ ip->ihl = 0x5;
+ ip->ttl = attr.ttl;
+ ip->daddr = attr.fl.fl4.daddr;
+ ip->saddr = attr.fl.fl4.saddr;
+
+ /* add tunneling protocol header */
+ err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr),
+ &ip->protocol, e);
+ if (err)
+ goto free_encap;
+
+ e->encap_size = ipv4_encap_size;
+ kfree(e->encap_header);
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+ goto release_neigh;
+ }
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv4_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ err = PTR_ERR(e->pkt_reformat);
+ goto free_encap;
+ }
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv4_put(&attr);
+ return err;
+
+free_encap:
+ kfree(encap_header);
+release_neigh:
+ mlx5e_route_lookup_ipv4_put(&attr);
return err;
}
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
- struct net_device **out_dev,
- struct net_device **route_dev,
- struct flowi6 *fl6,
- struct neighbour **out_n,
- u8 *out_ttl)
+ struct mlx5e_tc_tun_route_attr *attr)
{
+ struct net_device *route_dev;
+ struct net_device *out_dev;
struct dst_entry *dst;
struct neighbour *n;
-
int ret;
- dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, fl6,
+ dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
return PTR_ERR(dst);
- if (!(*out_ttl))
- *out_ttl = ip6_dst_hoplimit(dst);
+ if (!attr->ttl)
+ attr->ttl = ip6_dst_hoplimit(dst);
- ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
- if (ret < 0) {
- dst_release(dst);
- return ret;
- }
+ ret = get_route_and_out_devs(priv, dst->dev, &route_dev, &out_dev);
+ if (ret < 0)
+ goto err_dst_release;
- dev_hold(*route_dev);
- n = dst_neigh_lookup(dst, &fl6->daddr);
- dst_release(dst);
+ dev_hold(route_dev);
+ n = dst_neigh_lookup(dst, &attr->fl.fl6.daddr);
if (!n) {
- dev_put(*route_dev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_dev_release;
}
- *out_n = n;
+ dst_release(dst);
+ attr->out_dev = out_dev;
+ attr->route_dev = route_dev;
+ attr->n = n;
return 0;
+
+err_dev_release:
+ dev_put(route_dev);
+err_dst_release:
+ dst_release(dst);
+ return ret;
}
-static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
- struct neighbour *n)
+static void mlx5e_route_lookup_ipv6_put(struct mlx5e_tc_tun_route_attr *attr)
{
- neigh_release(n);
- dev_put(route_dev);
+ mlx5e_tc_tun_route_attr_cleanup(attr);
}
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
@@ -353,28 +471,25 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
- struct net_device *out_dev, *route_dev;
- struct flowi6 fl6 = {};
+ struct mlx5e_neigh m_neigh = {};
+ TC_TUN_ROUTE_ATTR_INIT(attr);
struct ipv6hdr *ip6h;
- struct neighbour *n = NULL;
int ipv6_encap_size;
char *encap_header;
- u8 nud_state, ttl;
+ u8 nud_state;
int err;
- ttl = tun_key->ttl;
+ attr.ttl = tun_key->ttl;
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
+ attr.fl.fl6.saddr = tun_key->u.ipv6.src;
- fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
- fl6.daddr = tun_key->u.ipv6.dst;
- fl6.saddr = tun_key->u.ipv6.src;
-
- err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
- &fl6, &n, &ttl);
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr);
if (err)
return err;
ipv6_encap_size =
- (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
sizeof(struct ipv6hdr) +
e->tunnel->calc_hlen(e);
@@ -391,39 +506,35 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto release_neigh;
}
- /* used by mlx5e_detach_encap to lookup a neigh hash table
- * entry in the neigh hash table when a user deletes a rule
- */
- e->m_neigh.dev = n->dev;
- e->m_neigh.family = n->ops->family;
- memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
- e->out_dev = out_dev;
- e->route_dev_ifindex = route_dev->ifindex;
+ m_neigh.family = attr.n->ops->family;
+ memcpy(&m_neigh.dst_ip, attr.n->primary_key, attr.n->tbl->key_len);
+ e->out_dev = attr.out_dev;
+ e->route_dev_ifindex = attr.route_dev->ifindex;
/* It's importent to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the
* neigh changes it's validity state, we would find the relevant neigh
* in the hash.
*/
- err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
+ err = mlx5e_rep_encap_entry_attach(netdev_priv(attr.out_dev), e, &m_neigh, attr.n->dev);
if (err)
goto free_encap;
- read_lock_bh(&n->lock);
- nud_state = n->nud_state;
- ether_addr_copy(e->h_dest, n->ha);
- read_unlock_bh(&n->lock);
+ read_lock_bh(&attr.n->lock);
+ nud_state = attr.n->nud_state;
+ ether_addr_copy(e->h_dest, attr.n->ha);
+ read_unlock_bh(&attr.n->lock);
/* add ethernet header */
- ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e,
+ ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
ETH_P_IPV6);
/* add ip header */
ip6_flow_hdr(ip6h, tun_key->tos, 0);
/* the HW fills up ipv6 payload len */
- ip6h->hop_limit = ttl;
- ip6h->daddr = fl6.daddr;
- ip6h->saddr = fl6.saddr;
+ ip6h->hop_limit = attr.ttl;
+ ip6h->daddr = attr.fl.fl6.daddr;
+ ip6h->saddr = attr.fl.fl6.saddr;
/* add tunneling protocol header */
err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
@@ -435,7 +546,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->encap_header = encap_header;
if (!(nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
+ neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
@@ -452,8 +563,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
}
e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
- mlx5e_route_lookup_ipv6_put(route_dev, n);
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
return err;
destroy_neigh_entry:
@@ -461,10 +572,160 @@ destroy_neigh_entry:
free_encap:
kfree(encap_header);
release_neigh:
- mlx5e_route_lookup_ipv6_put(route_dev, n);
+ mlx5e_route_lookup_ipv6_put(&attr);
return err;
}
+
+int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ TC_TUN_ROUTE_ATTR_INIT(attr);
+ struct ipv6hdr *ip6h;
+ int ipv6_encap_size;
+ char *encap_header;
+ u8 nud_state;
+ int err;
+
+ attr.ttl = tun_key->ttl;
+
+ attr.fl.fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ attr.fl.fl6.daddr = tun_key->u.ipv6.dst;
+ attr.fl.fl6.saddr = tun_key->u.ipv6.src;
+
+ err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &attr);
+ if (err)
+ return err;
+
+ ipv6_encap_size =
+ (is_vlan_dev(attr.route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) +
+ sizeof(struct ipv6hdr) +
+ e->tunnel->calc_hlen(e);
+
+ if (max_encap_size < ipv6_encap_size) {
+ mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
+ ipv6_encap_size, max_encap_size);
+ err = -EOPNOTSUPP;
+ goto release_neigh;
+ }
+
+ encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
+ if (!encap_header) {
+ err = -ENOMEM;
+ goto release_neigh;
+ }
+
+ e->route_dev_ifindex = attr.route_dev->ifindex;
+
+ read_lock_bh(&attr.n->lock);
+ nud_state = attr.n->nud_state;
+ ether_addr_copy(e->h_dest, attr.n->ha);
+ WRITE_ONCE(e->nhe->neigh_dev, attr.n->dev);
+ read_unlock_bh(&attr.n->lock);
+
+ /* add ethernet header */
+ ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, attr.route_dev, e,
+ ETH_P_IPV6);
+
+ /* add ip header */
+ ip6_flow_hdr(ip6h, tun_key->tos, 0);
+ /* the HW fills up ipv6 payload len */
+ ip6h->hop_limit = attr.ttl;
+ ip6h->daddr = attr.fl.fl6.daddr;
+ ip6h->saddr = attr.fl.fl6.saddr;
+
+ /* add tunneling protocol header */
+ err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr),
+ &ip6h->nexthdr, e);
+ if (err)
+ goto free_encap;
+
+ e->encap_size = ipv6_encap_size;
+ kfree(e->encap_header);
+ e->encap_header = encap_header;
+
+ if (!(nud_state & NUD_VALID)) {
+ neigh_event_send(attr.n, NULL);
+ /* the encap entry will be made valid on neigh update event
+ * and not used before that.
+ */
+ goto release_neigh;
+ }
+
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ ipv6_encap_size, encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ err = PTR_ERR(e->pkt_reformat);
+ goto free_encap;
+ }
+
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
+ mlx5e_route_lookup_ipv6_put(&attr);
+ return err;
+
+free_encap:
+ kfree(encap_header);
+release_neigh:
+ mlx5e_route_lookup_ipv6_put(&attr);
+ return err;
+}
+#endif
+
+int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *flow_attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
+ TC_TUN_ROUTE_ATTR_INIT(attr);
+ u16 vport_num;
+ int err = 0;
+
+ if (flow_attr->ip_version == 4) {
+ /* Addresses are swapped for decap */
+ attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
+ attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
+ err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
+ }
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ else if (flow_attr->ip_version == 6) {
+ /* Addresses are swapped for decap */
+ attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
+ attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
+ err = mlx5e_route_lookup_ipv6_get(priv, priv->netdev, &attr);
+ }
#endif
+ else
+ return 0;
+
+ if (err)
+ return err;
+
+ if (attr.route_dev->netdev_ops != &mlx5e_netdev_ops ||
+ !mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev))
+ goto out;
+
+ err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num);
+ if (err)
+ goto out;
+
+ esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
+ misc_parameters.vxlan_vni);
+ esw_attr->rx_tun_attr->decap_vport = vport_num;
+
+out:
+ if (flow_attr->ip_version == 4)
+ mlx5e_route_lookup_ipv4_put(&attr);
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ else if (flow_attr->ip_version == 6)
+ mlx5e_route_lookup_ipv6_put(&attr);
+#endif
+ return err;
+}
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev)
@@ -625,14 +886,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
}
}
- /* Enforce DMAC when offloading incoming tunneled flows.
- * Flow counters require a match on the DMAC.
- */
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
- ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
- dmac_47_16), priv->netdev->dev_addr);
-
/* let software handle IP fragments */
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 704359df6095..67de2bf36861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -11,6 +11,8 @@
#include "en.h"
#include "en_rep.h"
+#ifdef CONFIG_MLX5_ESWITCH
+
enum {
MLX5E_TC_TUNNEL_TYPE_UNKNOWN,
MLX5E_TC_TUNNEL_TYPE_VXLAN,
@@ -59,17 +61,30 @@ int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev,
int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
+int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e);
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e);
+int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e);
#else
static inline int
mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_encap_entry *e) { return -EOPNOTSUPP; }
+int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5e_encap_entry *e)
+{ return -EOPNOTSUPP; }
#endif
+int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
struct net_device *netdev);
@@ -86,4 +101,6 @@ int mlx5e_tc_tun_parse_udp_ports(struct mlx5e_priv *priv,
void *headers_c,
void *headers_v);
+#endif /* CONFIG_MLX5_ESWITCH */
+
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
new file mode 100644
index 000000000000..6a116335bb21
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -0,0 +1,1653 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <net/fib_notifier.h>
+#include "tc_tun_encap.h"
+#include "en_tc.h"
+#include "tc_tun.h"
+#include "rep/tc.h"
+#include "diag/en_tc_tracepoint.h"
+
+enum {
+ MLX5E_ROUTE_ENTRY_VALID = BIT(0),
+};
+
+struct mlx5e_route_key {
+ int ip_version;
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } endpoint_ip;
+};
+
+struct mlx5e_route_entry {
+ struct mlx5e_route_key key;
+ struct list_head encap_entries;
+ struct list_head decap_flows;
+ u32 flags;
+ struct hlist_node hlist;
+ refcount_t refcnt;
+ int tunnel_dev_index;
+ struct rcu_head rcu;
+};
+
+struct mlx5e_tc_tun_encap {
+ struct mlx5e_priv *priv;
+ struct notifier_block fib_nb;
+ spinlock_t route_lock; /* protects route_tbl */
+ unsigned long route_tbl_last_update;
+ DECLARE_HASHTABLE(route_tbl, 8);
+};
+
+static bool mlx5e_route_entry_valid(struct mlx5e_route_entry *r)
+{
+ return r->flags & MLX5E_ROUTE_ENTRY_VALID;
+}
+
+int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ struct mlx5_rx_tun_attr *tun_attr;
+ void *daddr, *saddr;
+ u8 ip_version;
+
+ tun_attr = kvzalloc(sizeof(*tun_attr), GFP_KERNEL);
+ if (!tun_attr)
+ return -ENOMEM;
+
+ esw_attr->rx_tun_attr = tun_attr;
+ ip_version = mlx5e_tc_get_ip_version(spec, true);
+
+ if (ip_version == 4) {
+ daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ saddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ tun_attr->dst_ip.v4 = *(__be32 *)daddr;
+ tun_attr->src_ip.v4 = *(__be32 *)saddr;
+ if (!tun_attr->dst_ip.v4 || !tun_attr->src_ip.v4)
+ return 0;
+ }
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ else if (ip_version == 6) {
+ int ipv6_size = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
+ struct in6_addr zerov6 = {};
+
+ daddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+ saddr = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ memcpy(&tun_attr->dst_ip.v6, daddr, ipv6_size);
+ memcpy(&tun_attr->src_ip.v6, saddr, ipv6_size);
+ if (!memcmp(&tun_attr->dst_ip.v6, &zerov6, sizeof(zerov6)) ||
+ !memcmp(&tun_attr->src_ip.v6, &zerov6, sizeof(zerov6)))
+ return 0;
+ }
+#endif
+ /* Only set the flag if both src and dst ip addresses exist. They are
+ * required to establish routing.
+ */
+ flow_flag_set(flow, TUN_RX);
+ return 0;
+}
+
+static bool mlx5e_tc_flow_all_encaps_valid(struct mlx5_esw_flow_attr *esw_attr)
+{
+ bool all_flow_encaps_valid = true;
+ int i;
+
+ /* Flow can be associated with multiple encap entries.
+ * Before offloading the flow verify that all of them have
+ * a valid neighbour.
+ */
+ for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
+ if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
+ continue;
+ if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
+ all_flow_encaps_valid = false;
+ break;
+ }
+ }
+
+ return all_flow_encaps_valid;
+}
+
+void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE)
+ return;
+
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ e->reformat_type,
+ e->encap_size, e->encap_header,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(e->pkt_reformat)) {
+ mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
+ PTR_ERR(e->pkt_reformat));
+ return;
+ }
+ e->flags |= MLX5_ENCAP_ENTRY_VALID;
+ mlx5e_rep_queue_neigh_stats_work(priv);
+
+ list_for_each_entry(flow, flow_list, tmp_list) {
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
+
+ esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
+ esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+
+ /* Do not offload flows with unresolved neighbors */
+ if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
+ continue;
+ /* update from slow path rule to encap rule */
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
+ err);
+ continue;
+ }
+
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
+ flow->rule[0] = rule;
+ /* was unset when slow path rule removed */
+ flow_flag_set(flow, OFFLOADED);
+ }
+}
+
+void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct list_head *flow_list)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ list_for_each_entry(flow, flow_list, tmp_list) {
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
+
+ /* update from encap rule to slow path rule */
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+ /* mark the flow's encap dest as non-valid */
+ esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
+ err);
+ continue;
+ }
+
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
+ flow->rule[0] = rule;
+ /* was unset when fast path rule removed */
+ flow_flag_set(flow, OFFLOADED);
+ }
+
+ /* we know that the encap is valid */
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+}
+
+static void mlx5e_take_tmp_flow(struct mlx5e_tc_flow *flow,
+ struct list_head *flow_list,
+ int index)
+{
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ return;
+ wait_for_completion(&flow->init_done);
+
+ flow->tmp_entry_index = index;
+ list_add(&flow->tmp_list, flow_list);
+}
+
+/* Takes reference to all flows attached to encap and adds the flows to
+ * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
+ */
+void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
+{
+ struct encap_flow_item *efi;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(efi, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
+ mlx5e_take_tmp_flow(flow, flow_list, efi->index);
+ }
+}
+
+/* Takes reference to all flows attached to route and adds the flows to
+ * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
+ */
+static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
+ struct list_head *flow_list)
+{
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(flow, &r->decap_flows, decap_routes)
+ mlx5e_take_tmp_flow(flow, flow_list, 0);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
+ struct mlx5e_encap_entry *e)
+{
+ struct mlx5e_encap_entry *next = NULL;
+
+retry:
+ rcu_read_lock();
+
+ /* find encap with non-zero reference counter value */
+ for (next = e ?
+ list_next_or_null_rcu(&nhe->encap_list,
+ &e->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list) :
+ list_first_or_null_rcu(&nhe->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list);
+ next;
+ next = list_next_or_null_rcu(&nhe->encap_list,
+ &next->encap_list,
+ struct mlx5e_encap_entry,
+ encap_list))
+ if (mlx5e_encap_take(next))
+ break;
+
+ rcu_read_unlock();
+
+ /* release starting encap */
+ if (e)
+ mlx5e_encap_put(netdev_priv(e->out_dev), e);
+ if (!next)
+ return next;
+
+ /* wait for encap to be fully initialized */
+ wait_for_completion(&next->res_ready);
+ /* continue searching if encap entry is not in valid state after completion */
+ if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
+ e = next;
+ goto retry;
+ }
+
+ return next;
+}
+
+void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+{
+ struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+ struct mlx5e_encap_entry *e = NULL;
+ struct mlx5e_tc_flow *flow;
+ struct mlx5_fc *counter;
+ struct neigh_table *tbl;
+ bool neigh_used = false;
+ struct neighbour *n;
+ u64 lastuse;
+
+ if (m_neigh->family == AF_INET)
+ tbl = &arp_tbl;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (m_neigh->family == AF_INET6)
+ tbl = ipv6_stub->nd_tbl;
+#endif
+ else
+ return;
+
+ /* mlx5e_get_next_valid_encap() releases previous encap before returning
+ * next one.
+ */
+ while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
+ struct mlx5e_priv *priv = netdev_priv(e->out_dev);
+ struct encap_flow_item *efi, *tmp;
+ struct mlx5_eswitch *esw;
+ LIST_HEAD(flow_list);
+
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_for_each_entry_safe(efi, tmp, &e->flows, list) {
+ flow = container_of(efi, struct mlx5e_tc_flow,
+ encaps[efi->index]);
+ if (IS_ERR(mlx5e_flow_get(flow)))
+ continue;
+ list_add(&flow->tmp_list, &flow_list);
+
+ if (mlx5e_is_offloaded_flow(flow)) {
+ counter = mlx5e_tc_get_counter(flow);
+ lastuse = mlx5_fc_query_lastuse(counter);
+ if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
+ neigh_used = true;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_put_flow_list(priv, &flow_list);
+ if (neigh_used) {
+ /* release current encap before breaking the loop */
+ mlx5e_encap_put(priv, e);
+ break;
+ }
+ }
+
+ trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
+
+ if (neigh_used) {
+ nhe->reported_lastuse = jiffies;
+
+ /* find the relevant neigh according to the cached device and
+ * dst ip pair
+ */
+ n = neigh_lookup(tbl, &m_neigh->dst_ip, READ_ONCE(nhe->neigh_dev));
+ if (!n)
+ return;
+
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+ }
+}
+
+static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ WARN_ON(!list_empty(&e->flows));
+
+ if (e->compl_result > 0) {
+ mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
+
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID)
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+ }
+
+ kfree(e->tun_info);
+ kfree(e->encap_header);
+ kfree_rcu(e, rcu);
+}
+
+static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
+ struct mlx5e_decap_entry *d)
+{
+ WARN_ON(!list_empty(&d->flows));
+
+ if (!d->compl_result)
+ mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
+
+ kfree_rcu(d, rcu);
+}
+
+void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
+ return;
+ list_del(&e->route_list);
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
+}
+
+static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
+ return;
+ hash_del_rcu(&d->hlist);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ mlx5e_decap_dealloc(priv, d);
+}
+
+static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index);
+
+void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow, int out_index)
+{
+ struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (flow->attr->esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ mlx5e_detach_encap_route(priv, flow, out_index);
+
+ /* flow wasn't fully initialized */
+ if (!e)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_del(&flow->encaps[out_index].list);
+ flow->encaps[out_index].e = NULL;
+ if (!refcount_dec_and_test(&e->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
+ }
+ list_del(&e->route_list);
+ hash_del_rcu(&e->encap_hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_encap_dealloc(priv, e);
+}
+
+void mlx5e_detach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_decap_entry *d = flow->decap_reformat;
+
+ if (!d)
+ return;
+
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ list_del(&flow->l3_to_l2_reformat);
+ flow->decap_reformat = NULL;
+
+ if (!refcount_dec_and_test(&d->refcnt)) {
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return;
+ }
+ hash_del_rcu(&d->hlist);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ mlx5e_decap_dealloc(priv, d);
+}
+
+struct encap_key {
+ const struct ip_tunnel_key *ip_tun_key;
+ struct mlx5e_tc_tunnel *tc_tunnel;
+};
+
+static int cmp_encap_info(struct encap_key *a,
+ struct encap_key *b)
+{
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+ a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
+}
+
+static int cmp_decap_info(struct mlx5e_decap_key *a,
+ struct mlx5e_decap_key *b)
+{
+ return memcmp(&a->key, &b->key, sizeof(b->key));
+}
+
+static int hash_encap_info(struct encap_key *key)
+{
+ return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+ key->tc_tunnel->tunnel_type);
+}
+
+static int hash_decap_info(struct mlx5e_decap_key *key)
+{
+ return jhash(&key->key, sizeof(key->key), 0);
+}
+
+bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
+static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
+{
+ return refcount_inc_not_zero(&e->refcnt);
+}
+
+static struct mlx5e_encap_entry *
+mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_entry *e;
+ struct encap_key e_key;
+
+ hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
+ encap_hlist, hash_key) {
+ e_key.ip_tun_key = &e->tun_info->key;
+ e_key.tc_tunnel = e->tunnel;
+ if (!cmp_encap_info(&e_key, key) &&
+ mlx5e_encap_take(e))
+ return e;
+ }
+
+ return NULL;
+}
+
+static struct mlx5e_decap_entry *
+mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
+ uintptr_t hash_key)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_decap_key r_key;
+ struct mlx5e_decap_entry *e;
+
+ hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
+ hlist, hash_key) {
+ r_key = e->key;
+ if (!cmp_decap_info(&r_key, key) &&
+ mlx5e_decap_take(e))
+ return e;
+ }
+ return NULL;
+}
+
+struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info)
+{
+ size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
+
+ return kmemdup(tun_info, tun_size, GFP_KERNEL);
+}
+
+static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index,
+ struct mlx5e_encap_entry *e,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ for (i = 0; i < out_index; i++) {
+ if (flow->encaps[i].e != e)
+ continue;
+ NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
+ netdev_err(priv->netdev, "can't duplicate encap action\n");
+ return true;
+ }
+
+ return false;
+}
+
+static int mlx5e_set_vf_tunnel(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ struct net_device *out_dev,
+ int route_dev_ifindex,
+ int out_index)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct net_device *route_dev;
+ u16 vport_num;
+ int err = 0;
+ u32 data;
+
+ route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex);
+
+ if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops ||
+ !mlx5e_tc_is_vf_tunnel(out_dev, route_dev))
+ goto out;
+
+ err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num);
+ if (err)
+ goto out;
+
+ attr->dest_chain = 0;
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
+ data = mlx5_eswitch_get_vport_metadata_for_set(esw_attr->in_mdev->priv.eswitch,
+ vport_num);
+ err = mlx5e_tc_match_to_reg_set_and_get_id(esw->dev, mod_hdr_acts,
+ MLX5_FLOW_NAMESPACE_FDB,
+ VPORT_TO_REG, data);
+ if (err >= 0) {
+ esw_attr->dests[out_index].src_port_rewrite_act_id = err;
+ err = 0;
+ }
+
+out:
+ if (route_dev)
+ dev_put(route_dev);
+ return err;
+}
+
+static int mlx5e_update_vf_tunnel(struct mlx5_eswitch *esw,
+ struct mlx5_esw_flow_attr *attr,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ struct net_device *out_dev,
+ int route_dev_ifindex,
+ int out_index)
+{
+ int act_id = attr->dests[out_index].src_port_rewrite_act_id;
+ struct net_device *route_dev;
+ u16 vport_num;
+ int err = 0;
+ u32 data;
+
+ route_dev = dev_get_by_index(dev_net(out_dev), route_dev_ifindex);
+
+ if (!route_dev || route_dev->netdev_ops != &mlx5e_netdev_ops ||
+ !mlx5e_tc_is_vf_tunnel(out_dev, route_dev)) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = mlx5e_tc_query_route_vport(out_dev, route_dev, &vport_num);
+ if (err)
+ goto out;
+
+ data = mlx5_eswitch_get_vport_metadata_for_set(attr->in_mdev->priv.eswitch,
+ vport_num);
+ mlx5e_tc_match_to_reg_mod_hdr_change(esw->dev, mod_hdr_acts, VPORT_TO_REG, act_id, data);
+
+out:
+ if (route_dev)
+ dev_put(route_dev);
+ return err;
+}
+
+static unsigned int mlx5e_route_tbl_get_last_update(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_tun_encap *encap;
+ unsigned int ret;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ encap = uplink_priv->encap;
+
+ spin_lock_bh(&encap->route_lock);
+ ret = encap->route_tbl_last_update;
+ spin_unlock_bh(&encap->route_lock);
+ return ret;
+}
+
+static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_encap_entry *e,
+ bool new_encap_entry,
+ unsigned long tbl_time_before,
+ int out_index);
+
+int mlx5e_attach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct net_device *mirred_dev,
+ int out_index,
+ struct netlink_ext_ack *extack,
+ struct net_device **encap_dev,
+ bool *encap_valid)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ const struct ip_tunnel_info *tun_info;
+ unsigned long tbl_time_before = 0;
+ struct encap_key key;
+ struct mlx5e_encap_entry *e;
+ bool entry_created = false;
+ unsigned short family;
+ uintptr_t hash_key;
+ int err = 0;
+
+ parse_attr = attr->parse_attr;
+ tun_info = parse_attr->tun_info[out_index];
+ family = ip_tunnel_info_af(tun_info);
+ key.ip_tun_key = &tun_info->key;
+ key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
+ if (!key.tc_tunnel) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
+ return -EOPNOTSUPP;
+ }
+
+ hash_key = hash_encap_info(&key);
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ e = mlx5e_encap_get(priv, &key, hash_key);
+
+ /* must verify if encap is valid or not */
+ if (e) {
+ /* Check that entry was not already attached to this flow */
+ if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
+ err = -EOPNOTSUPP;
+ goto out_err;
+ }
+
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ wait_for_completion(&e->res_ready);
+
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ if (e->compl_result < 0) {
+ err = -EREMOTEIO;
+ goto out_err;
+ }
+ goto attach_flow;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ refcount_set(&e->refcnt, 1);
+ init_completion(&e->res_ready);
+ entry_created = true;
+ INIT_LIST_HEAD(&e->route_list);
+
+ tun_info = mlx5e_dup_tun_info(tun_info);
+ if (!tun_info) {
+ err = -ENOMEM;
+ goto out_err_init;
+ }
+ e->tun_info = tun_info;
+ err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
+ if (err)
+ goto out_err_init;
+
+ INIT_LIST_HEAD(&e->flows);
+ hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+ tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ if (family == AF_INET)
+ err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
+ else if (family == AF_INET6)
+ err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
+
+ /* Protect against concurrent neigh update. */
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ complete_all(&e->res_ready);
+ if (err) {
+ e->compl_result = err;
+ goto out_err;
+ }
+ e->compl_result = 1;
+
+attach_flow:
+ err = mlx5e_attach_encap_route(priv, flow, e, entry_created, tbl_time_before,
+ out_index);
+ if (err)
+ goto out_err;
+
+ flow->encaps[out_index].e = e;
+ list_add(&flow->encaps[out_index].list, &e->flows);
+ flow->encaps[out_index].index = out_index;
+ *encap_dev = e->out_dev;
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
+ attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ *encap_valid = true;
+ } else {
+ *encap_valid = false;
+ }
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ return err;
+
+out_err:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ if (e)
+ mlx5e_encap_put(priv, e);
+ return err;
+
+out_err_init:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ kfree(tun_info);
+ kfree(e);
+ return err;
+}
+
+int mlx5e_attach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_decap_entry *d;
+ struct mlx5e_decap_key key;
+ uintptr_t hash_key;
+ int err = 0;
+
+ parse_attr = flow->attr->parse_attr;
+ if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "encap header larger than max supported");
+ return -EOPNOTSUPP;
+ }
+
+ key.key = parse_attr->eth;
+ hash_key = hash_decap_info(&key);
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ d = mlx5e_decap_get(priv, &key, hash_key);
+ if (d) {
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ wait_for_completion(&d->res_ready);
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ if (d->compl_result) {
+ err = -EREMOTEIO;
+ goto out_free;
+ }
+ goto found;
+ }
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ d->key = key;
+ refcount_set(&d->refcnt, 1);
+ init_completion(&d->res_ready);
+ INIT_LIST_HEAD(&d->flows);
+ hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+
+ d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
+ MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
+ sizeof(parse_attr->eth),
+ &parse_attr->eth,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(d->pkt_reformat)) {
+ err = PTR_ERR(d->pkt_reformat);
+ d->compl_result = err;
+ }
+ mutex_lock(&esw->offloads.decap_tbl_lock);
+ complete_all(&d->res_ready);
+ if (err)
+ goto out_free;
+
+found:
+ flow->decap_reformat = d;
+ attr->decap_pkt_reformat = d->pkt_reformat;
+ list_add(&flow->l3_to_l2_reformat, &d->flows);
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return 0;
+
+out_free:
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ mlx5e_decap_put(priv, d);
+ return err;
+
+out_err:
+ mutex_unlock(&esw->offloads.decap_tbl_lock);
+ return err;
+}
+
+static int cmp_route_info(struct mlx5e_route_key *a,
+ struct mlx5e_route_key *b)
+{
+ if (a->ip_version == 4 && b->ip_version == 4)
+ return memcmp(&a->endpoint_ip.v4, &b->endpoint_ip.v4,
+ sizeof(a->endpoint_ip.v4));
+ else if (a->ip_version == 6 && b->ip_version == 6)
+ return memcmp(&a->endpoint_ip.v6, &b->endpoint_ip.v6,
+ sizeof(a->endpoint_ip.v6));
+ return 1;
+}
+
+static u32 hash_route_info(struct mlx5e_route_key *key)
+{
+ if (key->ip_version == 4)
+ return jhash(&key->endpoint_ip.v4, sizeof(key->endpoint_ip.v4), 0);
+ return jhash(&key->endpoint_ip.v6, sizeof(key->endpoint_ip.v6), 0);
+}
+
+static void mlx5e_route_dealloc(struct mlx5e_priv *priv,
+ struct mlx5e_route_entry *r)
+{
+ WARN_ON(!list_empty(&r->decap_flows));
+ WARN_ON(!list_empty(&r->encap_entries));
+
+ kfree_rcu(r, rcu);
+}
+
+static void mlx5e_route_put(struct mlx5e_priv *priv, struct mlx5e_route_entry *r)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (!refcount_dec_and_mutex_lock(&r->refcnt, &esw->offloads.encap_tbl_lock))
+ return;
+
+ hash_del_rcu(&r->hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_route_dealloc(priv, r);
+}
+
+static void mlx5e_route_put_locked(struct mlx5e_priv *priv, struct mlx5e_route_entry *r)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ lockdep_assert_held(&esw->offloads.encap_tbl_lock);
+
+ if (!refcount_dec_and_test(&r->refcnt))
+ return;
+ hash_del_rcu(&r->hlist);
+ mlx5e_route_dealloc(priv, r);
+}
+
+static struct mlx5e_route_entry *
+mlx5e_route_get(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key,
+ u32 hash_key)
+{
+ struct mlx5e_route_key r_key;
+ struct mlx5e_route_entry *r;
+
+ hash_for_each_possible(encap->route_tbl, r, hlist, hash_key) {
+ r_key = r->key;
+ if (!cmp_route_info(&r_key, key) &&
+ refcount_inc_not_zero(&r->refcnt))
+ return r;
+ }
+ return NULL;
+}
+
+static struct mlx5e_route_entry *
+mlx5e_route_get_create(struct mlx5e_priv *priv,
+ struct mlx5e_route_key *key,
+ int tunnel_dev_index,
+ unsigned long *route_tbl_change_time)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_tun_encap *encap;
+ struct mlx5e_route_entry *r;
+ u32 hash_key;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ encap = uplink_priv->encap;
+
+ hash_key = hash_route_info(key);
+ spin_lock_bh(&encap->route_lock);
+ r = mlx5e_route_get(encap, key, hash_key);
+ spin_unlock_bh(&encap->route_lock);
+ if (r) {
+ if (!mlx5e_route_entry_valid(r)) {
+ mlx5e_route_put_locked(priv, r);
+ return ERR_PTR(-EINVAL);
+ }
+ return r;
+ }
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return ERR_PTR(-ENOMEM);
+
+ r->key = *key;
+ r->flags |= MLX5E_ROUTE_ENTRY_VALID;
+ r->tunnel_dev_index = tunnel_dev_index;
+ refcount_set(&r->refcnt, 1);
+ INIT_LIST_HEAD(&r->decap_flows);
+ INIT_LIST_HEAD(&r->encap_entries);
+
+ spin_lock_bh(&encap->route_lock);
+ *route_tbl_change_time = encap->route_tbl_last_update;
+ hash_add(encap->route_tbl, &r->hlist, hash_key);
+ spin_unlock_bh(&encap->route_lock);
+
+ return r;
+}
+
+static struct mlx5e_route_entry *
+mlx5e_route_lookup_for_update(struct mlx5e_tc_tun_encap *encap, struct mlx5e_route_key *key)
+{
+ u32 hash_key = hash_route_info(key);
+ struct mlx5e_route_entry *r;
+
+ spin_lock_bh(&encap->route_lock);
+ encap->route_tbl_last_update = jiffies;
+ r = mlx5e_route_get(encap, key, hash_key);
+ spin_unlock_bh(&encap->route_lock);
+
+ return r;
+}
+
+struct mlx5e_tc_fib_event_data {
+ struct work_struct work;
+ unsigned long event;
+ struct mlx5e_route_entry *r;
+ struct net_device *ul_dev;
+};
+
+static void mlx5e_tc_fib_event_work(struct work_struct *work);
+static struct mlx5e_tc_fib_event_data *
+mlx5e_tc_init_fib_work(unsigned long event, struct net_device *ul_dev, gfp_t flags)
+{
+ struct mlx5e_tc_fib_event_data *fib_work;
+
+ fib_work = kzalloc(sizeof(*fib_work), flags);
+ if (WARN_ON(!fib_work))
+ return NULL;
+
+ INIT_WORK(&fib_work->work, mlx5e_tc_fib_event_work);
+ fib_work->event = event;
+ fib_work->ul_dev = ul_dev;
+
+ return fib_work;
+}
+
+static int
+mlx5e_route_enqueue_update(struct mlx5e_priv *priv,
+ struct mlx5e_route_entry *r,
+ unsigned long event)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_fib_event_data *fib_work;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct net_device *ul_dev;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ ul_dev = uplink_rpriv->netdev;
+
+ fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_KERNEL);
+ if (!fib_work)
+ return -ENOMEM;
+
+ dev_hold(ul_dev);
+ refcount_inc(&r->refcnt);
+ fib_work->r = r;
+ queue_work(priv->wq, &fib_work->work);
+
+ return 0;
+}
+
+int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ unsigned long tbl_time_before, tbl_time_after;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5e_route_entry *r;
+ struct mlx5e_route_key key;
+ int err = 0;
+
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ if (!esw_attr->rx_tun_attr)
+ goto out;
+
+ tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
+ tbl_time_after = tbl_time_before;
+ err = mlx5e_tc_tun_route_lookup(priv, &parse_attr->spec, attr);
+ if (err || !esw_attr->rx_tun_attr->decap_vport)
+ goto out;
+
+ key.ip_version = attr->ip_version;
+ if (key.ip_version == 4)
+ key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4;
+ else
+ key.endpoint_ip.v6 = esw_attr->rx_tun_attr->dst_ip.v6;
+
+ r = mlx5e_route_get_create(priv, &key, parse_attr->filter_dev->ifindex,
+ &tbl_time_after);
+ if (IS_ERR(r)) {
+ err = PTR_ERR(r);
+ goto out;
+ }
+ /* Routing changed concurrently. FIB event handler might have missed new
+ * entry, schedule update.
+ */
+ if (tbl_time_before != tbl_time_after) {
+ err = mlx5e_route_enqueue_update(priv, r, FIB_EVENT_ENTRY_REPLACE);
+ if (err) {
+ mlx5e_route_put_locked(priv, r);
+ goto out;
+ }
+ }
+
+ flow->decap_route = r;
+ list_add(&flow->decap_routes, &r->decap_flows);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return 0;
+
+out:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return err;
+}
+
+static int mlx5e_attach_encap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5e_encap_entry *e,
+ bool new_encap_entry,
+ unsigned long tbl_time_before,
+ int out_index)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ unsigned long tbl_time_after = tbl_time_before;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ const struct ip_tunnel_info *tun_info;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5e_route_entry *r;
+ struct mlx5e_route_key key;
+ unsigned short family;
+ int err = 0;
+
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ tun_info = parse_attr->tun_info[out_index];
+ family = ip_tunnel_info_af(tun_info);
+
+ if (family == AF_INET) {
+ key.endpoint_ip.v4 = tun_info->key.u.ipv4.src;
+ key.ip_version = 4;
+ } else if (family == AF_INET6) {
+ key.endpoint_ip.v6 = tun_info->key.u.ipv6.src;
+ key.ip_version = 6;
+ }
+
+ err = mlx5e_set_vf_tunnel(esw, attr, &parse_attr->mod_hdr_acts, e->out_dev,
+ e->route_dev_ifindex, out_index);
+ if (err || !(esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE))
+ return err;
+
+ r = mlx5e_route_get_create(priv, &key, parse_attr->mirred_ifindex[out_index],
+ &tbl_time_after);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+ /* Routing changed concurrently. FIB event handler might have missed new
+ * entry, schedule update.
+ */
+ if (tbl_time_before != tbl_time_after) {
+ err = mlx5e_route_enqueue_update(priv, r, FIB_EVENT_ENTRY_REPLACE);
+ if (err) {
+ mlx5e_route_put_locked(priv, r);
+ return err;
+ }
+ }
+
+ flow->encap_routes[out_index].r = r;
+ if (new_encap_entry)
+ list_add(&e->route_list, &r->encap_entries);
+ flow->encap_routes[out_index].index = out_index;
+ return 0;
+}
+
+void mlx5e_detach_decap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_route_entry *r = flow->decap_route;
+
+ if (!r)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ list_del(&flow->decap_routes);
+ flow->decap_route = NULL;
+
+ if (!refcount_dec_and_test(&r->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
+ }
+ hash_del_rcu(&r->hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_route_dealloc(priv, r);
+}
+
+static void mlx5e_detach_encap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ int out_index)
+{
+ struct mlx5e_route_entry *r = flow->encap_routes[out_index].r;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_encap_entry *e, *tmp;
+
+ if (!r)
+ return;
+
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ flow->encap_routes[out_index].r = NULL;
+
+ if (!refcount_dec_and_test(&r->refcnt)) {
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ return;
+ }
+ list_for_each_entry_safe(e, tmp, &r->encap_entries, route_list)
+ list_del_init(&e->route_list);
+ hash_del_rcu(&r->hlist);
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+
+ mlx5e_route_dealloc(priv, r);
+}
+
+static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
+ struct mlx5e_encap_entry *e,
+ struct list_head *encap_flows)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(flow, encap_flows, tmp_list) {
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+
+ if (!mlx5e_is_offloaded_flow(flow))
+ continue;
+ esw_attr = attr->esw_attr;
+
+ if (flow_flag_test(flow, SLOW))
+ mlx5e_tc_unoffload_from_slow_path(esw, flow);
+ else
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+ mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
+ attr->modify_hdr = NULL;
+
+ esw_attr->dests[flow->tmp_entry_index].flags &=
+ ~MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
+ }
+
+ e->flags |= MLX5_ENCAP_ENTRY_NO_ROUTE;
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
+ mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
+ e->pkt_reformat = NULL;
+ }
+}
+
+static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
+ struct net_device *tunnel_dev,
+ struct mlx5e_encap_entry *e,
+ struct list_head *encap_flows)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *flow;
+ int err;
+
+ err = ip_tunnel_info_af(e->tun_info) == AF_INET ?
+ mlx5e_tc_tun_update_header_ipv4(priv, tunnel_dev, e) :
+ mlx5e_tc_tun_update_header_ipv6(priv, tunnel_dev, e);
+ if (err)
+ mlx5_core_warn(priv->mdev, "Failed to update encap header, %d", err);
+ e->flags &= ~MLX5_ENCAP_ENTRY_NO_ROUTE;
+
+ list_for_each_entry(flow, encap_flows, tmp_list) {
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+
+ if (flow_flag_test(flow, FAILED))
+ continue;
+
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+ spec = &parse_attr->spec;
+
+ err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
+ e->out_dev, e->route_dev_ifindex,
+ flow->tmp_entry_index);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update VF tunnel err=%d", err);
+ continue;
+ }
+
+ err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
+ err);
+ continue;
+ }
+
+ if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
+ esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
+ esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
+ goto offload_to_slow_path;
+ /* update from slow path rule to encap rule */
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
+ err);
+ } else {
+ flow->rule[0] = rule;
+ }
+ } else {
+offload_to_slow_path:
+ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
+ /* mark the flow's encap dest as non-valid */
+ esw_attr->dests[flow->tmp_entry_index].flags &=
+ ~MLX5_ESW_DEST_ENCAP_VALID;
+
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
+ err);
+ } else {
+ flow->rule[0] = rule;
+ }
+ }
+ flow_flag_set(flow, OFFLOADED);
+ }
+}
+
+static int mlx5e_update_route_encaps(struct mlx5e_priv *priv,
+ struct mlx5e_route_entry *r,
+ struct list_head *flow_list,
+ bool replace)
+{
+ struct net_device *tunnel_dev;
+ struct mlx5e_encap_entry *e;
+
+ tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index);
+ if (!tunnel_dev)
+ return -ENODEV;
+
+ list_for_each_entry(e, &r->encap_entries, route_list) {
+ LIST_HEAD(encap_flows);
+
+ mlx5e_take_all_encap_flows(e, &encap_flows);
+ if (list_empty(&encap_flows))
+ continue;
+
+ if (mlx5e_route_entry_valid(r))
+ mlx5e_invalidate_encap(priv, e, &encap_flows);
+
+ if (!replace) {
+ list_splice(&encap_flows, flow_list);
+ continue;
+ }
+
+ mlx5e_reoffload_encap(priv, tunnel_dev, e, &encap_flows);
+ list_splice(&encap_flows, flow_list);
+ }
+
+ return 0;
+}
+
+static void mlx5e_unoffload_flow_list(struct mlx5e_priv *priv,
+ struct list_head *flow_list)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(flow, flow_list, tmp_list)
+ if (mlx5e_is_offloaded_flow(flow))
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+}
+
+static void mlx5e_reoffload_decap(struct mlx5e_priv *priv,
+ struct list_head *decap_flows)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *flow;
+
+ list_for_each_entry(flow, decap_flows, tmp_list) {
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ if (flow_flag_test(flow, FAILED))
+ continue;
+
+ parse_attr = attr->parse_attr;
+ spec = &parse_attr->spec;
+ err = mlx5e_tc_tun_route_lookup(priv, spec, attr);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "Failed to lookup route for flow, %d\n",
+ err);
+ continue;
+ }
+
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(priv->mdev, "Failed to update cached decap flow, %d\n",
+ err);
+ } else {
+ flow->rule[0] = rule;
+ flow_flag_set(flow, OFFLOADED);
+ }
+ }
+}
+
+static int mlx5e_update_route_decap_flows(struct mlx5e_priv *priv,
+ struct mlx5e_route_entry *r,
+ struct list_head *flow_list,
+ bool replace)
+{
+ struct net_device *tunnel_dev;
+ LIST_HEAD(decap_flows);
+
+ tunnel_dev = __dev_get_by_index(dev_net(priv->netdev), r->tunnel_dev_index);
+ if (!tunnel_dev)
+ return -ENODEV;
+
+ mlx5e_take_all_route_decap_flows(r, &decap_flows);
+ if (mlx5e_route_entry_valid(r))
+ mlx5e_unoffload_flow_list(priv, &decap_flows);
+ if (replace)
+ mlx5e_reoffload_decap(priv, &decap_flows);
+
+ list_splice(&decap_flows, flow_list);
+
+ return 0;
+}
+
+static void mlx5e_tc_fib_event_work(struct work_struct *work)
+{
+ struct mlx5e_tc_fib_event_data *event_data =
+ container_of(work, struct mlx5e_tc_fib_event_data, work);
+ struct net_device *ul_dev = event_data->ul_dev;
+ struct mlx5e_priv *priv = netdev_priv(ul_dev);
+ struct mlx5e_route_entry *r = event_data->r;
+ struct mlx5_eswitch *esw;
+ LIST_HEAD(flow_list);
+ bool replace;
+ int err;
+
+ /* sync with concurrent neigh updates */
+ rtnl_lock();
+ esw = priv->mdev->priv.eswitch;
+ mutex_lock(&esw->offloads.encap_tbl_lock);
+ replace = event_data->event == FIB_EVENT_ENTRY_REPLACE;
+
+ if (!mlx5e_route_entry_valid(r) && !replace)
+ goto out;
+
+ err = mlx5e_update_route_encaps(priv, r, &flow_list, replace);
+ if (err)
+ mlx5_core_warn(priv->mdev, "Failed to update route encaps, %d\n",
+ err);
+
+ err = mlx5e_update_route_decap_flows(priv, r, &flow_list, replace);
+ if (err)
+ mlx5_core_warn(priv->mdev, "Failed to update route decap flows, %d\n",
+ err);
+
+ if (replace)
+ r->flags |= MLX5E_ROUTE_ENTRY_VALID;
+out:
+ mutex_unlock(&esw->offloads.encap_tbl_lock);
+ rtnl_unlock();
+
+ mlx5e_put_flow_list(priv, &flow_list);
+ mlx5e_route_put(priv, event_data->r);
+ dev_put(event_data->ul_dev);
+ kfree(event_data);
+}
+
+static struct mlx5e_tc_fib_event_data *
+mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
+ struct net_device *ul_dev,
+ struct mlx5e_tc_tun_encap *encap,
+ unsigned long event,
+ struct fib_notifier_info *info)
+{
+ struct fib_entry_notifier_info *fen_info;
+ struct mlx5e_tc_fib_event_data *fib_work;
+ struct mlx5e_route_entry *r;
+ struct mlx5e_route_key key;
+ struct net_device *fib_dev;
+
+ fen_info = container_of(info, struct fib_entry_notifier_info, info);
+ fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
+ if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ fen_info->dst_len != 32)
+ return NULL;
+
+ fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_ATOMIC);
+ if (!fib_work)
+ return ERR_PTR(-ENOMEM);
+
+ key.endpoint_ip.v4 = htonl(fen_info->dst);
+ key.ip_version = 4;
+
+ /* Can't fail after this point because releasing reference to r
+ * requires obtaining sleeping mutex which we can't do in atomic
+ * context.
+ */
+ r = mlx5e_route_lookup_for_update(encap, &key);
+ if (!r)
+ goto out;
+ fib_work->r = r;
+ dev_hold(ul_dev);
+
+ return fib_work;
+
+out:
+ kfree(fib_work);
+ return NULL;
+}
+
+static struct mlx5e_tc_fib_event_data *
+mlx5e_init_fib_work_ipv6(struct mlx5e_priv *priv,
+ struct net_device *ul_dev,
+ struct mlx5e_tc_tun_encap *encap,
+ unsigned long event,
+ struct fib_notifier_info *info)
+{
+ struct fib6_entry_notifier_info *fen_info;
+ struct mlx5e_tc_fib_event_data *fib_work;
+ struct mlx5e_route_entry *r;
+ struct mlx5e_route_key key;
+ struct net_device *fib_dev;
+
+ fen_info = container_of(info, struct fib6_entry_notifier_info, info);
+ fib_dev = fib6_info_nh_dev(fen_info->rt);
+ if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ fen_info->rt->fib6_dst.plen != 128)
+ return NULL;
+
+ fib_work = mlx5e_tc_init_fib_work(event, ul_dev, GFP_ATOMIC);
+ if (!fib_work)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(&key.endpoint_ip.v6, &fen_info->rt->fib6_dst.addr,
+ sizeof(fen_info->rt->fib6_dst.addr));
+ key.ip_version = 6;
+
+ /* Can't fail after this point because releasing reference to r
+ * requires obtaining sleeping mutex which we can't do in atomic
+ * context.
+ */
+ r = mlx5e_route_lookup_for_update(encap, &key);
+ if (!r)
+ goto out;
+ fib_work->r = r;
+ dev_hold(ul_dev);
+
+ return fib_work;
+
+out:
+ kfree(fib_work);
+ return NULL;
+}
+
+static int mlx5e_tc_tun_fib_event(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct mlx5e_tc_fib_event_data *fib_work;
+ struct fib_notifier_info *info = ptr;
+ struct mlx5e_tc_tun_encap *encap;
+ struct net_device *ul_dev;
+ struct mlx5e_priv *priv;
+
+ encap = container_of(nb, struct mlx5e_tc_tun_encap, fib_nb);
+ priv = encap->priv;
+ ul_dev = priv->netdev;
+ priv = netdev_priv(ul_dev);
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_DEL:
+ if (info->family == AF_INET)
+ fib_work = mlx5e_init_fib_work_ipv4(priv, ul_dev, encap, event, info);
+ else if (info->family == AF_INET6)
+ fib_work = mlx5e_init_fib_work_ipv6(priv, ul_dev, encap, event, info);
+ else
+ return NOTIFY_DONE;
+
+ if (!IS_ERR_OR_NULL(fib_work)) {
+ queue_work(priv->wq, &fib_work->work);
+ } else if (IS_ERR(fib_work)) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Failed to init fib work");
+ mlx5_core_warn(priv->mdev, "Failed to init fib work, %ld\n",
+ PTR_ERR(fib_work));
+ }
+
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
+}
+
+struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_tun_encap *encap;
+ int err;
+
+ encap = kvzalloc(sizeof(*encap), GFP_KERNEL);
+ if (!encap)
+ return ERR_PTR(-ENOMEM);
+
+ encap->priv = priv;
+ encap->fib_nb.notifier_call = mlx5e_tc_tun_fib_event;
+ spin_lock_init(&encap->route_lock);
+ hash_init(encap->route_tbl);
+ err = register_fib_notifier(dev_net(priv->netdev), &encap->fib_nb,
+ NULL, NULL);
+ if (err) {
+ kvfree(encap);
+ return ERR_PTR(err);
+ }
+
+ return encap;
+}
+
+void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap)
+{
+ if (!encap)
+ return;
+
+ unregister_fib_notifier(dev_net(encap->priv->netdev), &encap->fib_nb);
+ flush_workqueue(encap->priv->wq); /* flush fib event works */
+ kvfree(encap);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
new file mode 100644
index 000000000000..3391504d9a08
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_TC_TUN_ENCAP_H__
+#define __MLX5_EN_TC_TUN_ENCAP_H__
+
+#include "tc_priv.h"
+
+void mlx5e_detach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow, int out_index);
+
+int mlx5e_attach_encap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct net_device *mirred_dev,
+ int out_index,
+ struct netlink_ext_ack *extack,
+ struct net_device **encap_dev,
+ bool *encap_valid);
+int mlx5e_attach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack);
+void mlx5e_detach_decap(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
+int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+void mlx5e_detach_decap_route(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow);
+
+struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info);
+
+int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_spec *spec);
+
+struct mlx5e_tc_tun_encap *mlx5e_tc_tun_init(struct mlx5e_priv *priv);
+void mlx5e_tc_tun_cleanup(struct mlx5e_tc_tun_encap *encap);
+
+#endif /* __MLX5_EN_TC_TUN_ENCAP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
index 1f9526244222..3479672e84cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_mplsoudp.c
@@ -81,8 +81,8 @@ static int parse_tunnel(struct mlx5e_priv *priv,
if (!enc_keyid.mask->keyid)
return 0;
- if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_CW_MPLS_UDP))
+ if (!MLX5_CAP_ETH(priv->mdev, tunnel_stateless_mpls_over_udp) &&
+ !(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) & MLX5_FLEX_PROTO_CW_MPLS_UDP))
return -EOPNOTSUPP;
flow_rule_match_mpls(rule, &match);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
new file mode 100644
index 000000000000..37fc1d77ded7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies */
+
+#include <net/page_pool.h>
+#include "en/txrx.h"
+#include "en/params.h"
+#include "en/trap.h"
+
+static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_trap *trap_ctx = container_of(napi, struct mlx5e_trap, napi);
+ struct mlx5e_ch_stats *ch_stats = trap_ctx->stats;
+ struct mlx5e_rq *rq = &trap_ctx->rq;
+ bool busy = false;
+ int work_done = 0;
+
+ ch_stats->poll++;
+
+ work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
+ busy |= work_done == budget;
+ busy |= rq->post_wqes(rq);
+
+ if (busy)
+ return budget;
+
+ if (unlikely(!napi_complete_done(napi, work_done)))
+ return work_done;
+
+ mlx5e_cq_arm(&rq->cq);
+ return work_done;
+}
+
+static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
+ struct mlx5e_ch_stats *ch_stats,
+ struct mlx5e_rq *rq)
+{
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct page_pool_params pp_params = {};
+ int node = dev_to_node(mdev->device);
+ u32 pool_size;
+ int wq_sz;
+ int err;
+ int i;
+
+ rqp->wq.db_numa_node = node;
+
+ rq->wq_type = params->rq_wq_type;
+ rq->pdev = mdev->device;
+ rq->netdev = priv->netdev;
+ rq->mdev = mdev;
+ rq->priv = priv;
+ rq->stats = stats;
+ rq->clock = &mdev->clock;
+ rq->tstamp = &priv->tstamp;
+ rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+
+ xdp_rxq_info_unused(&rq->xdp_rxq);
+
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL);
+ pool_size = 1 << params->log_rq_mtu_frames;
+
+ err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
+
+ rq->wqe.info = rqp->frags_info;
+ rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
+ rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
+ (wq_sz << rq->wqe.info.log_num_frags)),
+ GFP_KERNEL, node);
+ if (!rq->wqe.frags) {
+ err = -ENOMEM;
+ goto err_wq_cyc_destroy;
+ }
+
+ err = mlx5e_init_di_list(rq, wq_sz, node);
+ if (err)
+ goto err_free_frags;
+
+ rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+
+ mlx5e_rq_set_trap_handlers(rq, params);
+
+ /* Create a page_pool and register it with rxq */
+ pp_params.order = 0;
+ pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
+ pp_params.pool_size = pool_size;
+ pp_params.nid = node;
+ pp_params.dev = mdev->device;
+ pp_params.dma_dir = rq->buff.map_dir;
+
+ /* page_pool can be used even when there is no rq->xdp_prog,
+ * given page_pool does not handle DMA mapping there is no
+ * required state to clear. And page_pool gracefully handle
+ * elevated refcnt.
+ */
+ rq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rq->page_pool)) {
+ err = PTR_ERR(rq->page_pool);
+ rq->page_pool = NULL;
+ goto err_free_di_list;
+ }
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_rx_wqe_cyc *wqe =
+ mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
+ int f;
+
+ for (f = 0; f < rq->wqe.info.num_frags; f++) {
+ u32 frag_size = rq->wqe.info.arr[f].frag_size |
+ MLX5_HW_START_PADDING;
+
+ wqe->data[f].byte_count = cpu_to_be32(frag_size);
+ wqe->data[f].lkey = rq->mkey_be;
+ }
+ /* check if num_frags is not a pow of two */
+ if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
+ wqe->data[f].byte_count = 0;
+ wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ wqe->data[f].addr = 0;
+ }
+ }
+ return 0;
+
+err_free_di_list:
+ mlx5e_free_di_list(rq);
+err_free_frags:
+ kvfree(rq->wqe.frags);
+err_wq_cyc_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_free_trap_rq(struct mlx5e_rq *rq)
+{
+ page_pool_destroy(rq->page_pool);
+ mlx5e_free_di_list(rq);
+ kvfree(rq->wqe.frags);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi,
+ struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param,
+ struct mlx5e_ch_stats *ch_stats,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_create_cq_param ccp = {};
+ struct dim_cq_moder trap_moder = {};
+ struct mlx5e_cq *cq = &rq->cq;
+ int err;
+
+ ccp.node = dev_to_node(mdev->device);
+ ccp.ch_stats = ch_stats;
+ ccp.napi = napi;
+ ccp.ix = 0;
+ err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq);
+ if (err)
+ return err;
+
+ err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5e_create_rq(rq, rq_param);
+ if (err)
+ goto err_free_rq;
+
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_destroy_rq;
+
+ return 0;
+
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+ mlx5e_free_rx_descs(rq);
+err_free_rq:
+ mlx5e_free_trap_rq(rq);
+err_destroy_cq:
+ mlx5e_close_cq(cq);
+
+ return err;
+}
+
+static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
+{
+ mlx5e_destroy_rq(rq);
+ mlx5e_free_rx_descs(rq);
+ mlx5e_free_trap_rq(rq);
+ mlx5e_close_cq(&rq->cq);
+}
+
+static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
+ u32 rqn)
+{
+ void *tirc;
+ int inlen;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn, rqn);
+ err = mlx5e_create_tir(mdev, tir, in);
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir)
+{
+ mlx5e_destroy_tir(mdev, tir);
+}
+
+static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq)
+{
+ set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+}
+
+static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq)
+{
+ clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+}
+
+static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t)
+{
+ struct mlx5e_params *params = &t->params;
+
+ params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
+ mlx5e_init_rq_type_params(priv->mdev, params);
+ params->sw_mtu = priv->netdev->max_mtu;
+ mlx5e_build_rq_param(priv, params, NULL, &t->rq_param);
+}
+
+static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
+{
+ int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
+ struct net_device *netdev = priv->netdev;
+ struct mlx5e_trap *t;
+ int err;
+
+ t = kvzalloc_node(sizeof(*t), GFP_KERNEL, cpu_to_node(cpu));
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+
+ mlx5e_build_trap_params(priv, t);
+
+ t->priv = priv;
+ t->mdev = priv->mdev;
+ t->tstamp = &priv->tstamp;
+ t->pdev = mlx5_core_dma_dev(priv->mdev);
+ t->netdev = priv->netdev;
+ t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+ t->stats = &priv->trap_stats.ch;
+
+ netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
+
+ err = mlx5e_open_trap_rq(priv, &t->napi,
+ &priv->trap_stats.rq,
+ &t->params, &t->rq_param,
+ &priv->trap_stats.ch,
+ &t->rq);
+ if (unlikely(err))
+ goto err_napi_del;
+
+ err = mlx5e_create_trap_direct_rq_tir(t->mdev, &t->tir, t->rq.rqn);
+ if (err)
+ goto err_close_trap_rq;
+
+ return t;
+
+err_close_trap_rq:
+ mlx5e_close_trap_rq(&t->rq);
+err_napi_del:
+ netif_napi_del(&t->napi);
+ kvfree(t);
+ return ERR_PTR(err);
+}
+
+void mlx5e_close_trap(struct mlx5e_trap *trap)
+{
+ mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir);
+ mlx5e_close_trap_rq(&trap->rq);
+ netif_napi_del(&trap->napi);
+ kvfree(trap);
+}
+
+static void mlx5e_activate_trap(struct mlx5e_trap *trap)
+{
+ napi_enable(&trap->napi);
+ mlx5e_activate_trap_rq(&trap->rq);
+ napi_schedule(&trap->napi);
+}
+
+void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
+{
+ struct mlx5e_trap *trap = priv->en_trap;
+
+ mlx5e_deactivate_trap_rq(&trap->rq);
+ napi_disable(&trap->napi);
+}
+
+static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
+{
+ struct mlx5e_trap *trap;
+
+ trap = mlx5e_open_trap(priv);
+ if (IS_ERR(trap))
+ goto out;
+
+ mlx5e_activate_trap(trap);
+out:
+ return trap;
+}
+
+static void mlx5e_del_trap_queue(struct mlx5e_priv *priv)
+{
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+}
+
+static int mlx5e_trap_get_tirn(struct mlx5e_trap *en_trap)
+{
+ return en_trap->tir.tirn;
+}
+
+static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
+{
+ bool open_queue = !priv->en_trap;
+ struct mlx5e_trap *trap;
+ int err;
+
+ if (open_queue) {
+ trap = mlx5e_add_trap_queue(priv);
+ if (IS_ERR(trap))
+ return PTR_ERR(trap);
+ priv->en_trap = trap;
+ }
+
+ switch (trap_id) {
+ case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
+ err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ if (err)
+ goto err_out;
+ break;
+ case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
+ err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
+ if (err)
+ goto err_out;
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
+ err = -EINVAL;
+ goto err_out;
+ }
+ return 0;
+
+err_out:
+ if (open_queue)
+ mlx5e_del_trap_queue(priv);
+ return err;
+}
+
+static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
+{
+ switch (trap_id) {
+ case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
+ mlx5e_remove_vlan_trap(priv);
+ break;
+ case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
+ mlx5e_remove_mac_trap(priv);
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
+ return -EINVAL;
+ }
+ if (priv->en_trap && !mlx5_devlink_trap_get_num_active(priv->mdev))
+ mlx5e_del_trap_queue(priv);
+
+ return 0;
+}
+
+int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx)
+{
+ int err = 0;
+
+ /* Traps are unarmed when interface is down, no need to update
+ * them. The configuration is saved in the core driver,
+ * queried and applied upon interface up operation in
+ * mlx5e_open_locked().
+ */
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ switch (trap_ctx->action) {
+ case DEVLINK_TRAP_ACTION_TRAP:
+ err = mlx5e_handle_action_trap(priv, trap_ctx->id);
+ break;
+ case DEVLINK_TRAP_ACTION_DROP:
+ err = mlx5e_handle_action_drop(priv, trap_ctx->id);
+ break;
+ default:
+ netdev_warn(priv->netdev, "%s: Unsupported action %d\n", __func__,
+ trap_ctx->action);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int mlx5e_apply_trap(struct mlx5e_priv *priv, int trap_id, bool enable)
+{
+ enum devlink_trap_action action;
+ int err;
+
+ err = mlx5_devlink_traps_get_action(priv->mdev, trap_id, &action);
+ if (err)
+ return err;
+ if (action == DEVLINK_TRAP_ACTION_TRAP)
+ err = enable ? mlx5e_handle_action_trap(priv, trap_id) :
+ mlx5e_handle_action_drop(priv, trap_id);
+ return err;
+}
+
+static const int mlx5e_traps_arr[] = {
+ DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
+ DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
+};
+
+int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mlx5e_traps_arr); i++) {
+ err = mlx5e_apply_trap(priv, mlx5e_traps_arr[i], enable);
+ if (err)
+ return err;
+ }
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
new file mode 100644
index 000000000000..aa3f17658c6d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies */
+
+#ifndef __MLX5E_TRAP_H__
+#define __MLX5E_TRAP_H__
+
+#include "../en.h"
+#include "../devlink.h"
+
+struct mlx5e_trap {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_tir tir;
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+
+ /* data path - accessed per napi poll */
+ struct mlx5e_ch_stats *stats;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ struct mlx5_core_dev *mdev;
+ struct hwtstamp_config *tstamp;
+ DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
+
+ struct mlx5e_params params;
+ struct mlx5e_rq_param rq_param;
+};
+
+void mlx5e_close_trap(struct mlx5e_trap *trap);
+void mlx5e_deactivate_trap(struct mlx5e_priv *priv);
+int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx);
+int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 4880f2179273..2371b83dad9c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -26,6 +26,13 @@
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+static inline
+ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
+{
+ return INDIRECT_CALL_2(func, mlx5_real_time_cyc2time, mlx5_timecounter_cyc2time,
+ clock, cqe_ts);
+}
+
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index d487e5e37162..8d991c3b7a50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -83,7 +83,7 @@ static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
/* Let other device's napi(s) and XSK wakeups see our new state. */
- synchronize_rcu();
+ synchronize_net();
}
static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index d87c345878d3..f4bce1365639 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -111,7 +111,7 @@ err_free_cparam:
void mlx5e_close_xsk(struct mlx5e_channel *c)
{
clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
- synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */
+ synchronize_net(); /* Sync with the XSK wakeup and with NAPI. */
mlx5e_close_rq(&c->xskrq);
mlx5e_close_cq(&c->xskrq.cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 1fae7fab8297..cc0efac7b812 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -85,7 +85,7 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ih
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
- if (skb_vlan_tag_present(skb) && ihs)
+ if (skb_vlan_tag_present(skb) && ihs)
mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
@@ -144,9 +144,9 @@ static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *sta
{
#ifdef CONFIG_MLX5_EN_IPSEC
return mlx5e_ipsec_is_tx_flow(&state->ipsec);
-#endif
-
+#else
return false;
+#endif
}
static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
@@ -173,7 +173,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#endif
#if IS_ENABLED(CONFIG_GENEVE)
- if (skb->encapsulation)
+ if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index a9b45606dbdb..a97e8d205094 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -497,20 +497,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
}
}
-bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features)
-{
- struct sec_path *sp = skb_sec_path(skb);
- struct xfrm_state *x;
-
- if (sp && sp->len) {
- x = sp->xvec[0];
- if (x && x->xso.offload_handle)
- return true;
- }
- return false;
-}
-
void mlx5e_ipsec_build_inverse_table(void)
{
u16 mss_inv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 9df9b9a8e09b..3e80742a3caf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -57,8 +57,6 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, u32 *cqe_bcnt);
void mlx5e_ipsec_inverse_table_init(void);
-bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
- netdev_features_t features);
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
@@ -87,8 +85,28 @@ static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ips
return ipsec_st->x;
}
+static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
+{
+ return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+}
+
void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg);
+
+static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct sec_path *sp = skb_sec_path(skb);
+
+ if (sp && sp->len) {
+ struct xfrm_state *x = sp->xvec[0];
+
+ if (x && x->xso.offload_handle)
+ return true;
+ }
+ return false;
+}
+
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
@@ -96,7 +114,14 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct mlx5_cqe64 *cqe)
{}
+static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
+
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false; }
+static inline bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev,
+ netdev_features_t features) { return false; }
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 1b392696280d..95293ee0d38d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -2,6 +2,7 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "en.h"
+#include "en_accel/tls.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
@@ -86,16 +87,33 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable)
int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
- int err = 0;
+ int err;
- if (priv->netdev->features & NETIF_F_HW_TLS_RX)
+ if (!mlx5_accel_is_ktls_rx(priv->mdev))
+ return 0;
+
+ priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
+ if (!priv->tls->rx_wq)
+ return -ENOMEM;
+
+ if (priv->netdev->features & NETIF_F_HW_TLS_RX) {
err = mlx5e_accel_fs_tcp_create(priv);
+ if (err) {
+ destroy_workqueue(priv->tls->rx_wq);
+ return err;
+ }
+ }
- return err;
+ return 0;
}
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
+ if (!mlx5_accel_is_ktls_rx(priv->mdev))
+ return;
+
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
mlx5e_accel_fs_tcp_destroy(priv);
+
+ destroy_workqueue(priv->tls->rx_wq);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 6a1d82503ef8..d06532d0baa4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -57,6 +57,20 @@ struct mlx5e_ktls_offload_context_rx {
struct mlx5e_ktls_rx_resync_ctx resync;
};
+static bool mlx5e_ktls_priv_rx_put(struct mlx5e_ktls_offload_context_rx *priv_rx)
+{
+ if (!refcount_dec_and_test(&priv_rx->resync.refcnt))
+ return false;
+
+ kfree(priv_rx);
+ return true;
+}
+
+static void mlx5e_ktls_priv_rx_get(struct mlx5e_ktls_offload_context_rx *priv_rx)
+{
+ refcount_inc(&priv_rx->resync.refcnt);
+}
+
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn)
{
int err, inlen;
@@ -326,7 +340,7 @@ static void resync_handle_work(struct work_struct *work)
priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
- refcount_dec(&resync->refcnt);
+ mlx5e_ktls_priv_rx_put(priv_rx);
return;
}
@@ -334,7 +348,7 @@ static void resync_handle_work(struct work_struct *work)
sq = &c->async_icosq;
if (resync_post_get_progress_params(sq, priv_rx))
- refcount_dec(&resync->refcnt);
+ mlx5e_ktls_priv_rx_put(priv_rx);
}
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
@@ -377,7 +391,11 @@ unlock:
return err;
}
-/* Function is called with elevated refcount, it decreases it. */
+/* Function can be called with the refcount being either elevated or not.
+ * It decreases the refcount and may free the kTLS priv context.
+ * Refcount is not elevated only if tls_dev_del has been called, but GET_PSV was
+ * already in flight.
+ */
void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
struct mlx5e_icosq *sq)
{
@@ -410,7 +428,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
priv_rx->stats->tls_resync_req_end++;
out:
- refcount_dec(&resync->refcnt);
+ mlx5e_ktls_priv_rx_put(priv_rx);
dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
kfree(buf);
}
@@ -431,9 +449,9 @@ static bool resync_queue_get_psv(struct sock *sk)
return false;
resync = &priv_rx->resync;
- refcount_inc(&resync->refcnt);
+ mlx5e_ktls_priv_rx_get(priv_rx);
if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
- refcount_dec(&resync->refcnt);
+ mlx5e_ktls_priv_rx_put(priv_rx);
return true;
}
@@ -625,31 +643,6 @@ err_create_key:
return err;
}
-/* Elevated refcount on the resync object means there are
- * outstanding operations (uncompleted GET_PSV WQEs) that
- * will read the resync / priv_rx objects once completed.
- * Wait for them to avoid use-after-free.
- */
-static void wait_for_resync(struct net_device *netdev,
- struct mlx5e_ktls_rx_resync_ctx *resync)
-{
-#define MLX5E_KTLS_RX_RESYNC_TIMEOUT 20000 /* msecs */
- unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5E_KTLS_RX_RESYNC_TIMEOUT);
- unsigned int refcnt;
-
- do {
- refcnt = refcount_read(&resync->refcnt);
- if (refcnt == 1)
- return;
-
- msleep(20);
- } while (time_before(jiffies, exp_time));
-
- netdev_warn(netdev,
- "Failed waiting for kTLS RX resync refcnt to be released (%u).\n",
- refcnt);
-}
-
void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
@@ -663,7 +656,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL);
- synchronize_rcu(); /* Sync with NAPI */
+ synchronize_net(); /* Sync with NAPI */
if (!cancel_work_sync(&priv_rx->rule.work))
/* completion is needed, as the priv_rx in the add flow
* is maintained on the wqe info (wi), not on the socket.
@@ -671,8 +664,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
wait_for_completion(&priv_rx->add_ctx);
resync = &priv_rx->resync;
if (cancel_work_sync(&resync->work))
- refcount_dec(&resync->refcnt);
- wait_for_resync(netdev, resync);
+ mlx5e_ktls_priv_rx_put(priv_rx);
priv_rx->stats->tls_del++;
if (priv_rx->rule.rule)
@@ -680,5 +672,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
mlx5_core_destroy_tir(mdev, priv_rx->tirn);
mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
- kfree(priv_rx);
+ /* priv_rx should normally be freed here, but if there is an outstanding
+ * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
+ * processed.
+ */
+ mlx5e_ktls_priv_rx_put(priv_rx);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index fee991f5ee7c..d6b21b899dbc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -231,12 +231,6 @@ int mlx5e_tls_init(struct mlx5e_priv *priv)
if (!tls)
return -ENOMEM;
- tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
- if (!tls->rx_wq) {
- kfree(tls);
- return -ENOMEM;
- }
-
priv->tls = tls;
return 0;
}
@@ -248,7 +242,6 @@ void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
if (!tls)
return;
- destroy_workqueue(tls->rx_wq);
kfree(tls);
priv->tls = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 302001d6661e..abdf721bb264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -447,6 +447,17 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
goto out;
}
+ /* Don't allow changing the number of channels if HTB offload is active,
+ * because the numeration of the QoS SQs will change, while per-queue
+ * qdiscs are attached.
+ */
+ if (priv->htb.maj_id) {
+ err = -EINVAL;
+ netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the number of channels\n",
+ __func__);
+ goto out;
+ }
+
new_channels.params = *cur_params;
new_channels.params.num_channels = count;
@@ -525,7 +536,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
static void
-mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
{
struct mlx5_core_dev *mdev = priv->mdev;
int tc;
@@ -540,6 +551,17 @@ mlx5e_set_priv_channels_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesc
coal->tx_coalesce_usecs,
coal->tx_max_coalesced_frames);
}
+ }
+}
+
+static void
+mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int i;
+
+ for (i = 0; i < priv->channels.num; ++i) {
+ struct mlx5e_channel *c = priv->channels.c[i];
mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
coal->rx_coalesce_usecs,
@@ -586,21 +608,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
tx_moder->pkts = coal->tx_max_coalesced_frames;
new_channels.params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- priv->channels.params = new_channels.params;
- goto out;
- }
- /* we are opened */
-
reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
- if (!reset_rx && !reset_tx) {
- mlx5e_set_priv_channels_coalesce(priv, coal);
- priv->channels.params = new_channels.params;
- goto out;
- }
-
if (reset_rx) {
u8 mode = MLX5E_GET_PFLAG(&new_channels.params,
MLX5E_PFLAG_RX_CQE_BASED_MODER);
@@ -614,6 +624,20 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
mlx5e_reset_tx_moderation(&new_channels.params, mode);
}
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
+ if (!reset_rx && !reset_tx) {
+ if (!coal->use_adaptive_rx_coalesce)
+ mlx5e_set_priv_channels_rx_coalesce(priv, coal);
+ if (!coal->use_adaptive_tx_coalesce)
+ mlx5e_set_priv_channels_tx_coalesce(priv, coal);
+ priv->channels.params = new_channels.params;
+ goto out;
+ }
+
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
out:
@@ -1972,6 +1996,16 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
if (!MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
return -EOPNOTSUPP;
+ /* Don't allow changing the PTP state if HTB offload is active, because
+ * the numeration of the QoS SQs will change, while per-queue qdiscs are
+ * attached.
+ */
+ if (priv->htb.maj_id) {
+ netdev_err(priv->netdev, "%s: HTB offload is active, cannot change the PTP state\n",
+ __func__);
+ return -EINVAL;
+ }
+
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_TX_PORT_TS, enable);
/* No need to verify SQ stop room as
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index e02e5895703d..16ce7756ac43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -46,7 +46,6 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
enum {
MLX5E_FULLMATCH = 0,
MLX5E_ALLMULTI = 1,
- MLX5E_PROMISC = 2,
};
enum {
@@ -306,6 +305,79 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
+static struct mlx5_flow_handle *
+mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
+{
+ struct mlx5_flow_destination dest = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+ spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
+ spec->flow_context.flow_tag = trap_id;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tir_num;
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ kvfree(spec);
+ return rule;
+}
+
+int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+{
+ struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ priv->fs.vlan.trap_rule = NULL;
+ netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
+ __func__, err);
+ return err;
+ }
+ priv->fs.vlan.trap_rule = rule;
+ return 0;
+}
+
+void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
+{
+ if (priv->fs.vlan.trap_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan.trap_rule);
+ priv->fs.vlan.trap_rule = NULL;
+ }
+}
+
+int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
+{
+ struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_handle *rule;
+ int err;
+
+ rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ priv->fs.l2.trap_rule = NULL;
+ netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
+ __func__, err);
+ return err;
+ }
+ priv->fs.l2.trap_rule = rule;
+ return 0;
+}
+
+void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
+{
+ if (priv->fs.l2.trap_rule) {
+ mlx5_del_flow_rules(priv->fs.l2.trap_rule);
+ priv->fs.l2.trap_rule = NULL;
+ }
+}
+
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
if (!priv->fs.vlan.cvlan_filter_disabled)
@@ -419,6 +491,8 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
+ mlx5e_remove_vlan_trap(priv);
+
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
@@ -596,6 +670,83 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
mlx5e_apply_netdev_addr(priv);
}
+#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
+#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
+
+static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_handle **rule_p;
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.ttc.ft.t;
+
+ rule_p = &priv->fs.promisc.rule;
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(*rule_p)) {
+ err = PTR_ERR(*rule_p);
+ *rule_p = NULL;
+ netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
+ }
+ kvfree(spec);
+ return err;
+}
+
+static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
+ struct mlx5_flow_table_attr ft_attr = {};
+ int err;
+
+ ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
+ ft_attr.prio = MLX5E_NIC_PRIO;
+
+ ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
+ return err;
+ }
+
+ err = mlx5e_add_promisc_rule(priv);
+ if (err)
+ goto err_destroy_promisc_table;
+
+ return 0;
+
+err_destroy_promisc_table:
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+
+ return err;
+}
+
+static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
+{
+ if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
+ return;
+ mlx5_del_flow_rules(priv->fs.promisc.rule);
+ priv->fs.promisc.rule = NULL;
+}
+
+static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
+{
+ if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
+ return;
+ mlx5e_del_promisc_rule(priv);
+ mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
+ priv->fs.promisc.ft.t = NULL;
+}
+
void mlx5e_set_rx_mode_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
@@ -615,14 +766,15 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+ int err;
if (enable_promisc) {
- if (!priv->channels.params.vlan_strip_disable)
+ err = mlx5e_create_promisc_table(priv);
+ if (err)
+ enable_promisc = false;
+ if (!priv->channels.params.vlan_strip_disable && !err)
netdev_warn_once(ndev,
"S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
- mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->fs.vlan.cvlan_filter_disabled)
- mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
@@ -635,11 +787,8 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
if (disable_allmulti)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
- if (disable_promisc) {
- if (!priv->fs.vlan.cvlan_filter_disabled)
- mlx5e_del_any_vid_rules(priv);
- mlx5e_del_l2_flow_rule(priv, &ea->promisc);
- }
+ if (disable_promisc)
+ mlx5e_destroy_promisc_table(priv);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
@@ -1306,9 +1455,6 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01;
break;
-
- case MLX5E_PROMISC:
- break;
}
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
@@ -1325,12 +1471,12 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
}
#define MLX5E_NUM_L2_GROUPS 3
-#define MLX5E_L2_GROUP1_SIZE BIT(0)
-#define MLX5E_L2_GROUP2_SIZE BIT(15)
-#define MLX5E_L2_GROUP3_SIZE BIT(0)
+#define MLX5E_L2_GROUP1_SIZE BIT(15)
+#define MLX5E_L2_GROUP2_SIZE BIT(0)
+#define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */
#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
MLX5E_L2_GROUP2_SIZE +\
- MLX5E_L2_GROUP3_SIZE)
+ MLX5E_L2_GROUP_TRAP_SIZE)
static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1353,7 +1499,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers.dmac_47_16);
- /* Flow Group for promiscuous */
+ /* Flow Group for full match */
+ eth_broadcast_addr(mc_dmac);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1362,9 +1510,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
goto err_destroy_groups;
ft->num_groups++;
- /* Flow Group for full match */
- eth_broadcast_addr(mc_dmac);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ /* Flow Group for allmulti */
+ eth_zero_addr(mc_dmac);
+ mc_dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1373,11 +1521,10 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
goto err_destroy_groups;
ft->num_groups++;
- /* Flow Group for allmulti */
- eth_zero_addr(mc_dmac);
- mc_dmac[0] = 0x01;
+ /* Flow Group for l2 traps */
+ memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_L2_GROUP3_SIZE;
+ ix += MLX5E_L2_GROUP_TRAP_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
@@ -1435,15 +1582,17 @@ err_destroy_flow_table:
return err;
}
-#define MLX5E_NUM_VLAN_GROUPS 4
+#define MLX5E_NUM_VLAN_GROUPS 5
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
+#define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE +\
MLX5E_VLAN_GROUP2_SIZE +\
- MLX5E_VLAN_GROUP3_SIZE)
+ MLX5E_VLAN_GROUP3_SIZE +\
+ MLX5E_VLAN_GROUP_TRAP_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@@ -1498,6 +1647,15 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
return 0;
err_destroy_groups:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 3fc7d18ac868..ec2fcb2a2977 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -65,6 +65,9 @@
#include "en/devlink.h"
#include "lib/mlx5.h"
#include "en/ptp.h"
+#include "qos.h"
+#include "en/trap.h"
+#include "fpga/ipsec.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
@@ -106,7 +109,7 @@ bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev,
if (!mlx5e_check_fragmented_striding_rq_cap(mdev))
return false;
- if (MLX5_IPSEC_DEV(mdev))
+ if (mlx5_fpga_is_ipsec_device(mdev))
return false;
if (params->xdp_prog) {
@@ -211,6 +214,33 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
+static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
+ int err;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_TYPE_TRAP:
+ err = mlx5e_handle_trap_event(priv, data);
+ break;
+ default:
+ netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event);
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
+{
+ priv->blocking_events_nb.notifier_call = blocking_event;
+ mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
+}
+
+static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
+{
+ mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
+}
+
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
@@ -342,13 +372,11 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
-static int mlx5e_init_di_list(struct mlx5e_rq *rq,
- int wq_sz, int cpu)
+int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
- rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
- GFP_KERNEL, cpu_to_node(cpu));
+ rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
if (!rq->wqe.di)
return -ENOMEM;
@@ -357,7 +385,7 @@ static int mlx5e_init_di_list(struct mlx5e_rq *rq,
return 0;
}
-static void mlx5e_free_di_list(struct mlx5e_rq *rq)
+void mlx5e_free_di_list(struct mlx5e_rq *rq)
{
kvfree(rq->wqe.di);
}
@@ -422,6 +450,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
rq->xsk_pool = xsk_pool;
+ rq->ptp_cyc2time = mlx5_is_real_time_rq(mdev) ?
+ mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
if (rq->xsk_pool)
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
@@ -499,7 +530,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
+ err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
if (err)
goto err_rq_frags;
@@ -650,11 +681,10 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
-static int mlx5e_create_rq(struct mlx5e_rq *rq,
- struct mlx5e_rq_param *param)
+int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
struct mlx5_core_dev *mdev = rq->mdev;
-
+ u8 ts_format;
void *in;
void *rqc;
void *wq;
@@ -667,6 +697,9 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
if (!in)
return -ENOMEM;
+ ts_format = mlx5_is_real_time_rq(mdev) ?
+ MLX5_RQC_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
@@ -674,6 +707,7 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, ts_format, ts_format);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -774,7 +808,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
return err;
}
-static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
@@ -914,7 +948,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq)
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
+ synchronize_net(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */
}
void mlx5e_close_rq(struct mlx5e_rq *rq)
@@ -1143,7 +1177,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
@@ -1154,6 +1187,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (param->is_mpw)
set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
sq->stop_room = param->stop_room;
+ sq->ptp_cyc2time = mlx5_is_real_time_sq(mdev) ?
+ mlx5_real_time_cyc2time :
+ mlx5_timecounter_cyc2time;
param->wq.db_numa_node = cpu_to_node(c->cpu);
err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1187,6 +1223,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
struct mlx5e_create_sq_param *csp,
u32 *sqn)
{
+ u8 ts_format;
void *in;
void *sqc;
void *wq;
@@ -1199,6 +1236,9 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
if (!in)
return -ENOMEM;
+ ts_format = mlx5_is_real_time_sq(mdev) ?
+ MLX5_SQC_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -1207,6 +1247,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
MLX5_SET(sqc, sqc, cqn, csp->cqn);
MLX5_SET(sqc, sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
@@ -1233,6 +1275,7 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p)
{
+ u64 bitmask = 0;
void *in;
void *sqc;
int inlen;
@@ -1248,9 +1291,14 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
MLX5_SET(sqc, sqc, state, p->next_state);
if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
- MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
- MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
+ bitmask |= 1;
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
+ }
+ if (p->qos_update && p->next_state == MLX5_SQC_STATE_RDY) {
+ bitmask |= 1 << 2;
+ MLX5_SET(sqc, sqc, qos_queue_group_id, p->qos_queue_group_id);
}
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, bitmask);
err = mlx5_core_modify_sq(mdev, sqn, in);
@@ -1267,6 +1315,7 @@ static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
struct mlx5e_sq_param *param,
struct mlx5e_create_sq_param *csp,
+ u16 qos_queue_group_id,
u32 *sqn)
{
struct mlx5e_modify_sq_param msp = {0};
@@ -1278,6 +1327,10 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
msp.curr_state = MLX5_SQC_STATE_RST;
msp.next_state = MLX5_SQC_STATE_RDY;
+ if (qos_queue_group_id) {
+ msp.qos_update = true;
+ msp.qos_queue_group_id = qos_queue_group_id;
+ }
err = mlx5e_modify_sq(mdev, *sqn, &msp);
if (err)
mlx5e_destroy_sq(mdev, *sqn);
@@ -1288,13 +1341,9 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
static int mlx5e_set_sq_maxrate(struct net_device *dev,
struct mlx5e_txqsq *sq, u32 rate);
-static int mlx5e_open_txqsq(struct mlx5e_channel *c,
- u32 tisn,
- int txq_ix,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq,
- int tc)
+int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
+ struct mlx5e_params *params, struct mlx5e_sq_param *param,
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@ -1304,12 +1353,17 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
if (err)
return err;
+ if (qos_queue_group_id)
+ sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
+ else
+ sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+
csp.tisn = tisn;
csp.tis_lst_sz = 1;
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, qos_queue_group_id, &sq->sqn);
if (err)
goto err_free_txqsq;
@@ -1348,7 +1402,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_wq_cyc *wq = &sq->wq;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
+ synchronize_net(); /* Sync with NAPI to prevent netif_tx_wake_queue. */
mlx5e_tx_disable_queue(sq->txq);
@@ -1366,7 +1420,7 @@ void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
}
}
-static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
+void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
{
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
@@ -1403,7 +1457,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.cqn = sq->cq.mcq.cqn;
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = params->tx_min_inline_mode;
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_icosq;
@@ -1423,7 +1477,7 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq)
void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq)
{
clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state);
- synchronize_rcu(); /* Sync with NAPI. */
+ synchronize_net(); /* Sync with NAPI. */
}
void mlx5e_close_icosq(struct mlx5e_icosq *sq)
@@ -1452,7 +1506,7 @@ int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
csp.wq_ctrl = &sq->wq_ctrl;
csp.min_inline_mode = sq->min_inline_mode;
set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
+ err = mlx5e_create_sq_rdy(c->mdev, param, &csp, 0, &sq->sqn);
if (err)
goto err_free_xdpsq;
@@ -1502,7 +1556,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
struct mlx5e_channel *c = sq->channel;
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
- synchronize_rcu(); /* Sync with NAPI. */
+ synchronize_net(); /* Sync with NAPI. */
mlx5e_destroy_sq(c->mdev, sq->sqn);
mlx5e_free_xdpsq_descs(sq);
@@ -1703,7 +1757,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
int txq_ix = c->ix + tc * params->num_channels;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc);
+ params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
if (err)
goto err_close_sqs;
}
@@ -1826,12 +1880,12 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
mlx5e_build_create_cq_param(&ccp, c);
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
&c->async_icosq.cq);
if (err)
return err;
- err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
+ err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
&c->icosq.cq);
if (err)
goto err_close_async_icosq_cq;
@@ -1855,13 +1909,11 @@ static int mlx5e_open_queues(struct mlx5e_channel *c,
if (err)
goto err_close_rx_cq;
- napi_enable(&c->napi);
-
spin_lock_init(&c->async_icosq_lock);
err = mlx5e_open_icosq(c, params, &cparam->async_icosq, &c->async_icosq);
if (err)
- goto err_disable_napi;
+ goto err_close_xdpsq_cq;
err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
if (err)
@@ -1904,9 +1956,7 @@ err_close_icosq:
err_close_async_icosq:
mlx5e_close_icosq(&c->async_icosq);
-err_disable_napi:
- napi_disable(&c->napi);
-
+err_close_xdpsq_cq:
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
@@ -1937,7 +1987,6 @@ static void mlx5e_close_queues(struct mlx5e_channel *c)
mlx5e_close_sqs(c);
mlx5e_close_icosq(&c->icosq);
mlx5e_close_icosq(&c->async_icosq);
- napi_disable(&c->napi);
if (c->xdp)
mlx5e_close_cq(&c->rq_xdpsq.cq);
mlx5e_close_cq(&c->rq.cq);
@@ -2022,6 +2071,8 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
{
int tc;
+ napi_enable(&c->napi);
+
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_activate_txqsq(&c->sq[tc]);
mlx5e_activate_icosq(&c->icosq);
@@ -2044,6 +2095,9 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
mlx5e_deactivate_icosq(&c->icosq);
for (tc = 0; tc < c->num_tc; tc++)
mlx5e_deactivate_txqsq(&c->sq[tc]);
+ mlx5e_qos_deactivate_queues(c);
+
+ napi_disable(&c->napi);
}
static void mlx5e_close_channel(struct mlx5e_channel *c)
@@ -2051,6 +2105,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
mlx5e_close_xsk(c);
mlx5e_close_queues(c);
+ mlx5e_qos_close_queues(c);
netif_napi_del(&c->napi);
kvfree(c);
@@ -2068,10 +2123,8 @@ static void mlx5e_build_rq_frags_info(struct mlx5_core_dev *mdev,
u32 buf_size = 0;
int i;
-#ifdef CONFIG_MLX5_EN_IPSEC
- if (MLX5_IPSEC_DEV(mdev))
+ if (mlx5_fpga_is_ipsec_device(mdev))
byte_count += MLX5E_METADATA_ETHER_LEN;
-#endif
if (mlx5e_rx_is_linear_skb(params, xsk)) {
int frag_stride;
@@ -2200,9 +2253,8 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
}
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
- struct mlx5e_params *params,
- struct mlx5e_sq_param *param)
+void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_params *params,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
@@ -2381,10 +2433,18 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
+ err = mlx5e_qos_open_queues(priv, chs);
+ if (err)
+ goto err_close_ptp;
+
mlx5e_health_channels_update(priv);
kvfree(cparam);
return 0;
+err_close_ptp:
+ if (chs->port_ptp)
+ mlx5e_port_ptp_close(chs->port_ptp);
+
err_close_channels:
for (i--; i >= 0; i--)
mlx5e_close_channel(chs->c[i]);
@@ -2917,11 +2977,31 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc)
netdev_set_tc_queue(netdev, tc, nch, 0);
}
+int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
+{
+ int qos_queues, nch, ntc, num_txqs, err;
+
+ qos_queues = mlx5e_qos_cur_leaf_nodes(priv);
+
+ nch = priv->channels.params.num_channels;
+ ntc = priv->channels.params.num_tc;
+ num_txqs = nch * ntc + qos_queues;
+ if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
+ num_txqs += ntc;
+
+ mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
+ err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
+ if (err)
+ netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+
+ return err;
+}
+
static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
{
struct net_device *netdev = priv->netdev;
- int num_txqs, num_rxqs, nch, ntc;
int old_num_txqs, old_ntc;
+ int num_rxqs, nch, ntc;
int err;
old_num_txqs = netdev->real_num_tx_queues;
@@ -2929,18 +3009,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
- num_txqs = nch * ntc;
- if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
- num_txqs += ntc;
num_rxqs = nch * priv->profile->rq_groups;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
- err = netif_set_real_num_tx_queues(netdev, num_txqs);
- if (err) {
- netdev_warn(netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
+ err = mlx5e_update_tx_netdev_queues(priv);
+ if (err)
goto err_tcs;
- }
err = netif_set_real_num_rx_queues(netdev, num_rxqs);
if (err) {
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
@@ -3044,6 +3119,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_update_num_tc_x_num_ch(priv);
mlx5e_build_txq_maps(priv);
mlx5e_activate_channels(&priv->channels);
+ mlx5e_qos_activate_queues(priv);
mlx5e_xdp_tx_enable(priv);
netif_tx_start_all_queues(priv->netdev);
@@ -3186,6 +3262,7 @@ int mlx5e_open_locked(struct net_device *netdev)
priv->profile->update_rx(priv);
mlx5e_activate_priv_channels(priv);
+ mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
@@ -3221,6 +3298,7 @@ int mlx5e_close_locked(struct net_device *netdev)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
+ mlx5e_apply_traps(priv, false);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
@@ -3610,6 +3688,14 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
+ /* MQPRIO is another toplevel qdisc that can't be attached
+ * simultaneously with the offloaded HTB.
+ */
+ if (WARN_ON(priv->htb.maj_id)) {
+ err = -EINVAL;
+ goto out;
+ }
+
new_channels.params = priv->channels.params;
new_channels.params.num_tc = tc ? tc : 1;
@@ -3635,12 +3721,55 @@ out:
return err;
}
+static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb)
+{
+ int res;
+
+ switch (htb->command) {
+ case TC_HTB_CREATE:
+ return mlx5e_htb_root_add(priv, htb->parent_classid, htb->classid,
+ htb->extack);
+ case TC_HTB_DESTROY:
+ return mlx5e_htb_root_del(priv);
+ case TC_HTB_LEAF_ALLOC_QUEUE:
+ res = mlx5e_htb_leaf_alloc_queue(priv, htb->classid, htb->parent_classid,
+ htb->rate, htb->ceil, htb->extack);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ case TC_HTB_LEAF_TO_INNER:
+ return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
+ htb->rate, htb->ceil, htb->extack);
+ case TC_HTB_LEAF_DEL:
+ return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid,
+ htb->extack);
+ case TC_HTB_LEAF_DEL_LAST:
+ case TC_HTB_LEAF_DEL_LAST_FORCE:
+ return mlx5e_htb_leaf_del_last(priv, htb->classid,
+ htb->command == TC_HTB_LEAF_DEL_LAST_FORCE,
+ htb->extack);
+ case TC_HTB_NODE_MODIFY:
+ return mlx5e_htb_node_modify(priv, htb->classid, htb->rate, htb->ceil,
+ htb->extack);
+ case TC_HTB_LEAF_QUERY_QUEUE:
+ res = mlx5e_get_txq_by_classid(priv, htb->classid);
+ if (res < 0)
+ return res;
+ htb->qid = res;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static LIST_HEAD(mlx5e_block_cb_list);
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
switch (type) {
case TC_SETUP_BLOCK: {
@@ -3654,6 +3783,11 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(priv, type_data);
+ case TC_SETUP_QDISC_HTB:
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_setup_tc_htb(priv, type_data);
+ mutex_unlock(&priv->state_lock);
+ return err;
default:
return -EOPNOTSUPP;
}
@@ -3769,7 +3903,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
if (enable && priv->xsk.refcnt) {
- netdev_warn(netdev, "LRO is incompatible with AF_XDP (%hu XSKs are active)\n",
+ netdev_warn(netdev, "LRO is incompatible with AF_XDP (%u XSKs are active)\n",
priv->xsk.refcnt);
err = -EINVAL;
goto out;
@@ -3823,20 +3957,25 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
return 0;
}
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+static int set_feature_hw_tc(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
if (!enable && mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD))) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
+#endif
+
+ if (!enable && priv->htb.maj_id) {
+ netdev_err(netdev, "Active HTB offload, can't turn hw_tc_offload off\n");
+ return -EINVAL;
+ }
return 0;
}
-#endif
static int set_feature_rx_all(struct net_device *netdev, bool enable)
{
@@ -3934,9 +4073,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
-#endif
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -3969,6 +4106,7 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
if (!params->vlan_strip_disable)
netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
}
+
if (!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ)) {
if (features & NETIF_F_LRO) {
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
@@ -4016,7 +4154,7 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
max_mtu_page = mlx5e_xdp_max_mtu(new_params, &xsk);
max_mtu = min(max_mtu_frame, max_mtu_page);
- netdev_err(netdev, "MTU %d is too big for an XSK running on channel %hu. Try MTU <= %d\n",
+ netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n",
new_params->sw_mtu, ix, max_mtu);
return false;
}
@@ -4393,10 +4531,8 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
features = vxlan_features_check(skb, features);
-#ifdef CONFIG_MLX5_EN_IPSEC
if (mlx5e_ipsec_feature_check(skb, netdev, features))
return features;
-#endif
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
@@ -4455,8 +4591,9 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
return -EINVAL;
}
- if (MLX5_IPSEC_DEV(priv->mdev)) {
- netdev_warn(netdev, "can't set XDP with IPSec offload\n");
+ if (mlx5_fpga_is_ipsec_device(priv->mdev)) {
+ netdev_warn(netdev,
+ "XDP is not available on Innova cards with IPsec support\n");
return -EINVAL;
}
@@ -4639,8 +4776,6 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_change_mtu = mlx5e_change_nic_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
@@ -4808,15 +4943,15 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
tirc_default_config[tt].rx_hash_fields;
}
-void mlx5e_build_nic_params(struct mlx5e_priv *priv,
- struct mlx5e_xsk *xsk,
- struct mlx5e_rss_params *rss_params,
- struct mlx5e_params *params,
- u16 mtu)
+void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
+ struct mlx5e_rss_params *rss_params = &priv->rss_params;
+ struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev;
u8 rx_cq_period_mode;
+ priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
+
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2,
@@ -4874,6 +5009,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv,
/* AF_XDP */
params->xsk = xsk;
+
+ /* Do not update netdev->features directly in here
+ * on mlx5e_attach_netdev() we will call mlx5e_update_features()
+ * To update netdev->features please modify mlx5e_fix_features()
+ */
}
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
@@ -4975,8 +5115,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- mlx5e_vxlan_set_netdev_info(priv);
-
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -5026,18 +5164,12 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_RXFCS;
netdev->features = netdev->hw_features;
- if (!priv->channels.params.lro_en)
- netdev->features &= ~NETIF_F_LRO;
+ /* Defaults */
if (fcs_enabled)
netdev->features &= ~NETIF_F_RXALL;
-
- if (!priv->channels.params.scatter_fcs_en)
- netdev->features &= ~NETIF_F_RXFCS;
-
- /* prefere CQE compression over rxhash */
- if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
- netdev->features &= ~NETIF_F_RXHASH;
+ netdev->features &= ~NETIF_F_LRO;
+ netdev->features &= ~NETIF_F_RXFCS;
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
if (FT_CAP(flow_modify_en) &&
@@ -5051,6 +5183,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
+ if (mlx5_qos_is_supported(mdev))
+ netdev->features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
@@ -5101,33 +5235,28 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
}
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+ struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rss_params *rss = &priv->rss_params;
int err;
- err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
- if (err)
- return err;
-
- mlx5e_build_nic_params(priv, &priv->xsk, rss, &priv->channels.params,
- netdev->mtu);
+ mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
+ mlx5e_vxlan_set_netdev_info(priv);
mlx5e_timestamp_init(priv);
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
+
err = mlx5e_tls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
- mlx5e_build_nic_netdev(netdev);
+
err = mlx5e_devlink_port_register(priv);
if (err)
mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
+
mlx5e_health_create_reporters(priv);
return 0;
@@ -5139,7 +5268,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_devlink_port_unregister(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
- mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
@@ -5268,6 +5396,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
+ mlx5e_enable_blocking_events(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
@@ -5305,6 +5434,12 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
+ mlx5e_disable_blocking_events(priv);
+ if (priv->en_trap) {
+ mlx5e_deactivate_trap(priv);
+ mlx5e_close_trap(priv->en_trap);
+ priv->en_trap = NULL;
+ }
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
@@ -5335,27 +5470,23 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
};
/* mlx5e generic netdev management API (move to en_common.c) */
-
-/* mlx5e_netdev_init/cleanup must be called from profile->init/cleanup callbacks */
-int mlx5e_netdev_init(struct net_device *netdev,
- struct mlx5e_priv *priv,
- struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+int mlx5e_priv_init(struct mlx5e_priv *priv,
+ struct net_device *netdev,
+ struct mlx5_core_dev *mdev)
{
+ memset(priv, 0, sizeof(*priv));
+
/* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
- priv->profile = profile;
- priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL;
- priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
priv->max_opened_tc = 1;
if (!alloc_cpumask_var(&priv->scratchpad.cpumask, GFP_KERNEL))
return -ENOMEM;
mutex_init(&priv->state_lock);
+ hash_init(priv->htb.qos_tc2node);
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
@@ -5365,9 +5496,6 @@ int mlx5e_netdev_init(struct net_device *netdev,
if (!priv->wq)
goto err_free_cpumask;
- /* netdev init */
- netif_carrier_off(netdev);
-
return 0;
err_free_cpumask:
@@ -5376,38 +5504,39 @@ err_free_cpumask:
return -ENOMEM;
}
-void mlx5e_netdev_cleanup(struct net_device *netdev, struct mlx5e_priv *priv)
+void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
{
+ int i;
+
destroy_workqueue(priv->wq);
free_cpumask_var(priv->scratchpad.cpumask);
+
+ for (i = 0; i < priv->htb.max_qos_sqs; i++)
+ kfree(priv->htb.qos_sq_stats[i]);
+ kvfree(priv->htb.qos_sq_stats);
}
-struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
- const struct mlx5e_profile *profile,
- int nch,
- void *ppriv)
+struct net_device *
+mlx5e_create_netdev(struct mlx5_core_dev *mdev, unsigned int txqs, unsigned int rxqs)
{
struct net_device *netdev;
- unsigned int ptp_txqs = 0;
int err;
- if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
- ptp_txqs = profile->max_tc;
-
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
- nch * profile->max_tc + ptp_txqs,
- nch * profile->rq_groups);
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
}
- err = profile->init(mdev, netdev, profile, ppriv);
+ err = mlx5e_priv_init(netdev_priv(netdev), netdev, mdev);
if (err) {
- mlx5_core_err(mdev, "failed to init mlx5e profile %d\n", err);
+ mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
goto err_free_netdev;
}
+ netif_carrier_off(netdev);
+ dev_net_set(netdev, mlx5_core_net(mdev));
+
return netdev;
err_free_netdev:
@@ -5416,14 +5545,23 @@ err_free_netdev:
return NULL;
}
+static void mlx5e_update_features(struct net_device *netdev)
+{
+ if (netdev->reg_state != NETREG_REGISTERED)
+ return; /* features will be updated on netdev registration */
+
+ rtnl_lock();
+ netdev_update_features(netdev);
+ rtnl_unlock();
+}
+
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
- const struct mlx5e_profile *profile;
+ const struct mlx5e_profile *profile = priv->profile;
int max_nch;
int err;
- profile = priv->profile;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
@@ -5463,6 +5601,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (profile->enable)
profile->enable(priv);
+ mlx5e_update_features(priv->netdev);
+
return 0;
err_cleanup_tx:
@@ -5489,13 +5629,76 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
cancel_work_sync(&priv->update_stats_work);
}
+static int
+mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *new_profile, void *new_ppriv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5e_priv_init(priv, netdev, mdev);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_priv_init failed, err=%d\n", err);
+ return err;
+ }
+ netif_carrier_off(netdev);
+ priv->profile = new_profile;
+ priv->ppriv = new_ppriv;
+ err = new_profile->init(priv->mdev, priv->netdev);
+ if (err)
+ return err;
+ err = mlx5e_attach_netdev(priv);
+ if (err)
+ new_profile->cleanup(priv);
+ return err;
+}
+
+int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
+ const struct mlx5e_profile *new_profile, void *new_ppriv)
+{
+ unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
+ const struct mlx5e_profile *orig_profile = priv->profile;
+ void *orig_ppriv = priv->ppriv;
+ int err, rollback_err;
+
+ /* sanity */
+ if (new_max_nch != priv->max_nch) {
+ netdev_warn(priv->netdev,
+ "%s: Replacing profile with different max channels\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ /* cleanup old profile */
+ mlx5e_detach_netdev(priv);
+ priv->profile->cleanup(priv);
+ mlx5e_priv_cleanup(priv);
+
+ err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv);
+ if (err) { /* roll back to original profile */
+ netdev_warn(priv->netdev, "%s: new profile init failed, %d\n",
+ __func__, err);
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv);
+ if (rollback_err) {
+ netdev_err(priv->netdev,
+ "%s: failed to rollback to orig profile, %d\n",
+ __func__, rollback_err);
+ }
+ return err;
+}
+
void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
{
- const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
- if (profile->cleanup)
- profile->cleanup(priv);
+ mlx5e_priv_cleanup(priv);
free_netdev(netdev);
}
@@ -5541,28 +5744,48 @@ static int mlx5e_probe(struct auxiliary_device *adev,
const struct auxiliary_device_id *id)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
+ const struct mlx5e_profile *profile = &mlx5e_nic_profile;
struct mlx5_core_dev *mdev = edev->mdev;
struct net_device *netdev;
pm_message_t state = {};
- void *priv;
+ unsigned int txqs, rxqs, ptp_txqs = 0;
+ struct mlx5e_priv *priv;
+ int qos_sqs = 0;
int err;
int nch;
+ if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
+ ptp_txqs = profile->max_tc;
+
+ if (mlx5_qos_is_supported(mdev))
+ qos_sqs = mlx5e_qos_max_leaf_nodes(mdev);
+
nch = mlx5e_get_max_num_channels(mdev);
- netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
+ txqs = nch * profile->max_tc + ptp_txqs + qos_sqs;
+ rxqs = nch * profile->rq_groups;
+ netdev = mlx5e_create_netdev(mdev, txqs, rxqs);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
return -ENOMEM;
}
- dev_net_set(netdev, mlx5_core_net(mdev));
+ mlx5e_build_nic_netdev(netdev);
+
priv = netdev_priv(netdev);
dev_set_drvdata(&adev->dev, priv);
+ priv->profile = profile;
+ priv->ppriv = NULL;
+ err = profile->init(mdev, netdev);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
+ goto err_destroy_netdev;
+ }
+
err = mlx5e_resume(adev);
if (err) {
mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
- goto err_destroy_netdev;
+ goto err_profile_cleanup;
}
err = register_netdev(netdev);
@@ -5578,6 +5801,8 @@ static int mlx5e_probe(struct auxiliary_device *adev,
err_resume:
mlx5e_suspend(adev, state);
+err_profile_cleanup:
+ profile->cleanup(priv);
err_destroy_netdev:
mlx5e_destroy_netdev(priv);
return err;
@@ -5591,6 +5816,7 @@ static void mlx5e_remove(struct auxiliary_device *adev)
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
+ priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index f0ceae65f6cf..a132fff7a980 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <generated/utsrelease.h>
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
@@ -653,8 +652,6 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
.ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = mlx5e_features_check,
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_rate = mlx5e_set_vf_rate,
@@ -686,7 +683,10 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ priv->max_nch = mlx5e_calc_max_nch(priv, priv->profile);
params = &priv->channels.params;
+
+ params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->sw_mtu = netdev->mtu;
@@ -712,20 +712,16 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
}
-static void mlx5e_build_rep_netdev(struct net_device *netdev)
+static void mlx5e_build_rep_netdev(struct net_device *netdev,
+ struct mlx5_core_dev *mdev,
+ struct mlx5_eswitch_rep *rep)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5_core_dev *mdev = priv->mdev;
-
SET_NETDEV_DEV(netdev, mdev->device);
if (rep->vport == MLX5_VPORT_UPLINK) {
netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
/* we want a persistent mac for the uplink rep */
mlx5_query_mac_address(mdev, netdev->dev_addr);
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
- mlx5e_vxlan_set_netdev_info(priv);
mlx5e_dcbnl_build_rep_netdev(netdev);
} else {
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
@@ -757,30 +753,27 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
}
static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+ struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int err;
-
- err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
- if (err)
- return err;
-
- priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
mlx5e_build_rep_params(netdev);
- mlx5e_build_rep_netdev(netdev);
-
mlx5e_timestamp_init(priv);
return 0;
}
+static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
+ struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_vxlan_set_netdev_info(priv);
+ return mlx5e_init_rep(mdev, netdev);
+}
+
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
- mlx5e_netdev_cleanup(priv->netdev, priv);
}
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
@@ -1057,7 +1050,17 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
mlx5e_set_netdev_mtu_boundaries(priv);
+ mlx5e_rep_neigh_init(rpriv);
+}
+
+static void mlx5e_rep_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ mlx5e_rep_neigh_cleanup(rpriv);
}
static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
@@ -1092,6 +1095,7 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
@@ -1110,12 +1114,15 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5_notifier_register(mdev, &priv->events_nb);
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
+ mlx5e_rep_neigh_init(rpriv);
}
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
+ mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);
mlx5e_rep_tc_disable(priv);
@@ -1167,6 +1174,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_rep_tx,
.enable = mlx5e_rep_enable,
+ .disable = mlx5e_rep_disable,
.update_rx = mlx5e_update_rep_rx,
.update_stats = mlx5e_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
@@ -1177,7 +1185,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
- .init = mlx5e_init_rep,
+ .init = mlx5e_init_ul_rep,
.cleanup = mlx5e_cleanup_rep,
.init_rx = mlx5e_init_ul_rep_rx,
.cleanup_rx = mlx5e_cleanup_ul_rep_rx,
@@ -1203,6 +1211,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
struct mlx5e_rep_priv *rpriv;
struct devlink_port *dl_port;
struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ unsigned int txqs, rxqs;
int nch, err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
@@ -1212,10 +1222,13 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
/* rpriv->rep to be looked up when profile->init() is called */
rpriv->rep = rep;
- nch = mlx5e_get_max_num_channels(dev);
profile = (rep->vport == MLX5_VPORT_UPLINK) ?
&mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
- netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
+
+ nch = mlx5e_get_max_num_channels(dev);
+ txqs = nch * profile->max_tc;
+ rxqs = nch * profile->rq_groups;
+ netdev = mlx5e_create_netdev(dev, txqs, rxqs);
if (!netdev) {
mlx5_core_warn(dev,
"Failed to create representor netdev for vport %d\n",
@@ -1224,7 +1237,8 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
return -EINVAL;
}
- dev_net_set(netdev, mlx5_core_net(dev));
+ mlx5e_build_rep_netdev(netdev, dev, rep);
+
rpriv->netdev = netdev;
rep->rep_data[REP_ETH].priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
@@ -1235,20 +1249,21 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_destroy_netdev;
}
- err = mlx5e_attach_netdev(netdev_priv(netdev));
+ priv = netdev_priv(netdev);
+ priv->profile = profile;
+ priv->ppriv = rpriv;
+ err = profile->init(dev, netdev);
if (err) {
- netdev_warn(netdev,
- "Failed to attach representor netdev for vport %d\n",
- rep->vport);
+ netdev_warn(netdev, "rep profile init failed, %d\n", err);
goto err_destroy_mdev_resources;
}
- err = mlx5e_rep_neigh_init(rpriv);
+ err = mlx5e_attach_netdev(netdev_priv(netdev));
if (err) {
netdev_warn(netdev,
- "Failed to initialized neighbours handling for vport %d\n",
+ "Failed to attach representor netdev for vport %d\n",
rep->vport);
- goto err_detach_netdev;
+ goto err_cleanup_profile;
}
err = register_netdev(netdev);
@@ -1256,7 +1271,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
netdev_warn(netdev,
"Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_neigh_cleanup;
+ goto err_detach_netdev;
}
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
@@ -1264,12 +1279,12 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
devlink_port_type_eth_set(dl_port, netdev);
return 0;
-err_neigh_cleanup:
- mlx5e_rep_neigh_cleanup(rpriv);
-
err_detach_netdev:
mlx5e_detach_netdev(netdev_priv(netdev));
+err_cleanup_profile:
+ priv->profile->cleanup(priv);
+
err_destroy_mdev_resources:
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(dev);
@@ -1294,8 +1309,8 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
if (dl_port)
devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
- mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
+ priv->profile->cleanup(priv);
if (rep->vport == MLX5_VPORT_UPLINK)
mlx5e_destroy_mdev_resources(priv->mdev);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 988195ab1c54..d1696404cca9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -59,6 +59,8 @@ struct mlx5e_neigh_update_table {
struct mlx5_tc_ct_priv;
struct mlx5e_rep_bond;
+struct mlx5e_tc_tun_encap;
+
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
@@ -90,6 +92,9 @@ struct mlx5_rep_uplink_priv {
/* support eswitch vports bonding */
struct mlx5e_rep_bond *bond;
+
+ /* tc tunneling encapsulation private data */
+ struct mlx5e_tc_tun_encap *encap;
};
struct mlx5e_rep_priv {
@@ -110,7 +115,6 @@ struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
}
struct mlx5e_neigh {
- struct net_device *dev;
union {
__be32 v4;
struct in6_addr v6;
@@ -122,6 +126,7 @@ struct mlx5e_neigh_hash_entry {
struct rhash_head rhash_node;
struct mlx5e_neigh m_neigh;
struct mlx5e_priv *priv;
+ struct net_device *neigh_dev;
/* Save the neigh hash entry in a list on the representor in
* addition to the hash table. In order to iterate easily over the
@@ -153,6 +158,7 @@ enum {
/* set when the encap entry is successfully offloaded into HW */
MLX5_ENCAP_ENTRY_VALID = BIT(0),
MLX5_REFORMAT_DECAP = BIT(1),
+ MLX5_ENCAP_ENTRY_NO_ROUTE = BIT(2),
};
struct mlx5e_decap_key {
@@ -175,12 +181,12 @@ struct mlx5e_encap_entry {
struct mlx5e_neigh_hash_entry *nhe;
/* neigh hash entry list of encaps sharing the same neigh */
struct list_head encap_list;
- struct mlx5e_neigh m_neigh;
/* a node of the eswitch encap hash table which keeping all the encap
* entries
*/
struct hlist_node encap_hlist;
struct list_head flows;
+ struct list_head route_list;
struct mlx5_pkt_reformat *pkt_reformat;
const struct ip_tunnel_info *tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index ca4b55839a8a..1b6ad94ebb10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -47,11 +47,11 @@
#include "fpga/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/tls_rxtx.h"
-#include "lib/clock.h"
#include "en/xdp.h"
#include "en/xsk/rx.h"
#include "en/health.h"
#include "en/params.h"
+#include "devlink.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
@@ -212,11 +212,6 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
}
-static inline bool mlx5e_page_is_reserved(struct page *page)
-{
- return page_is_pfmemalloc(page) || page_to_nid(page) != numa_mem_id();
-}
-
static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
@@ -229,7 +224,7 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
- if (unlikely(mlx5e_page_is_reserved(dma_info->page))) {
+ if (!dev_page_is_reusable(dma_info->page)) {
stats->cache_waive++;
return false;
}
@@ -1066,9 +1061,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
}
if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
- skb_hwtstamps(skb)->hwtstamp =
- mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
-
+ skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
+ rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -1126,12 +1120,8 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
{
- xdp->data_hard_start = va;
- xdp->data = va + headroom;
- xdp_set_data_meta_invalid(xdp);
- xdp->data_end = xdp->data + len;
- xdp->rxq = &rq->xdp_rxq;
- xdp->frame_sz = rq->buff.frame0_sz;
+ xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, va, headroom, len, false);
}
static struct sk_buff *
@@ -1675,9 +1665,8 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
}
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
- skb_hwtstamps(skb)->hwtstamp =
- mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
-
+ skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
+ rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -1794,12 +1783,10 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
-#ifdef CONFIG_MLX5_EN_IPSEC
- if (MLX5_IPSEC_DEV(mdev)) {
- netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
+ if (mlx5_fpga_is_ipsec_device(mdev)) {
+ netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL;
}
-#endif
if (!rq->handle_rx_cqe) {
netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
return -EINVAL;
@@ -1829,3 +1816,48 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
return 0;
}
+
+static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_priv *priv = netdev_priv(rq->netdev);
+ struct mlx5_wq_cyc *wq = &rq->wqe.wq;
+ struct mlx5e_wqe_frag_info *wi;
+ struct sk_buff *skb;
+ u32 cqe_bcnt;
+ u16 trap_id;
+ u16 ci;
+
+ trap_id = get_cqe_flow_tag(cqe);
+ ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
+ wi = get_frag(rq, ci);
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ rq->stats->wqe_err++;
+ goto free_wqe;
+ }
+
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
+ if (!skb)
+ goto free_wqe;
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ skb_push(skb, ETH_HLEN);
+
+ mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port);
+ dev_kfree_skb_any(skb);
+
+free_wqe:
+ mlx5e_free_rx_wqe(rq, wi, false);
+ mlx5_wq_cyc_pop(wq);
+}
+
+void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
+{
+ rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
+ mlx5e_skb_from_cqe_linear :
+ mlx5e_skb_from_cqe_nonlinear;
+ rq->post_wqes = mlx5e_post_rx_wqes;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+ rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 2cf2042b37c7..92c5b81427b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -420,6 +420,25 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
}
}
+static void mlx5e_stats_grp_sw_update_stats_qos(struct mlx5e_priv *priv,
+ struct mlx5e_sw_stats *s)
+{
+ struct mlx5e_sq_stats **stats;
+ u16 max_qos_sqs;
+ int i;
+
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ stats = READ_ONCE(priv->htb.qos_sq_stats);
+
+ for (i = 0; i < max_qos_sqs; i++) {
+ mlx5e_stats_grp_sw_update_stats_sq(s, READ_ONCE(stats[i]));
+
+ /* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
+ barrier();
+ }
+}
+
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
{
struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -449,6 +468,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
}
}
mlx5e_stats_grp_sw_update_stats_ptp(priv, s);
+ mlx5e_stats_grp_sw_update_stats_qos(priv, s);
}
static const struct counter_desc q_stats_desc[] = {
@@ -1740,6 +1760,41 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
};
+static const struct counter_desc qos_sq_stats_desc[] = {
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
+#ifdef CONFIG_MLX5_EN_TLS
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
+#endif
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, recover) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqes) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, cqe_err) },
+};
+
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
#define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc)
@@ -1750,6 +1805,49 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
+#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
+
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
+{
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ return NUM_QOS_SQ_STATS * smp_load_acquire(&priv->htb.max_qos_sqs);
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
+{
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ u16 max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ int i, qid;
+
+ for (qid = 0; qid < max_qos_sqs; qid++)
+ for (i = 0; i < NUM_QOS_SQ_STATS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ qos_sq_stats_desc[i].format, qid);
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
+{
+ struct mlx5e_sq_stats **stats;
+ u16 max_qos_sqs;
+ int i, qid;
+
+ /* Pairs with smp_store_release in mlx5e_open_qos_sq. */
+ max_qos_sqs = smp_load_acquire(&priv->htb.max_qos_sqs);
+ stats = READ_ONCE(priv->htb.qos_sq_stats);
+
+ for (qid = 0; qid < max_qos_sqs; qid++) {
+ struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
+
+ for (i = 0; i < NUM_QOS_SQ_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
+ }
+
+ return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
{
@@ -1932,6 +2030,7 @@ MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
static MLX5E_DEFINE_STATS_GRP(ptp, 0);
+static MLX5E_DEFINE_STATS_GRP(qos, 0);
/* The stats groups order is opposite to the update_stats() order calls */
mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
@@ -1955,6 +2054,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
&MLX5E_STATS_GRP(ptp),
+ &MLX5E_STATS_GRP(qos),
};
unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index e41fc11f2ce7..93c41312fb03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -55,6 +55,8 @@
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
+
struct counter_desc {
char format[ETH_GSTRING_LEN];
size_t offset; /* Byte offset */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index dd0bfbacad47..0da69b98f38f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -63,6 +63,8 @@
#include "en/mapping.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
+#include "en/tc_priv.h"
+#include "en/tc_tun_encap.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
#include "lib/fs_chains.h"
@@ -71,90 +73,6 @@
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
-#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
-
-enum {
- MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
- MLX5E_TC_FLOW_FLAG_EGRESS = MLX5E_TC_FLAG_EGRESS_BIT,
- MLX5E_TC_FLOW_FLAG_ESWITCH = MLX5E_TC_FLAG_ESW_OFFLOAD_BIT,
- MLX5E_TC_FLOW_FLAG_FT = MLX5E_TC_FLAG_FT_OFFLOAD_BIT,
- MLX5E_TC_FLOW_FLAG_NIC = MLX5E_TC_FLAG_NIC_OFFLOAD_BIT,
- MLX5E_TC_FLOW_FLAG_OFFLOADED = MLX5E_TC_FLOW_BASE,
- MLX5E_TC_FLOW_FLAG_HAIRPIN = MLX5E_TC_FLOW_BASE + 1,
- MLX5E_TC_FLOW_FLAG_HAIRPIN_RSS = MLX5E_TC_FLOW_BASE + 2,
- MLX5E_TC_FLOW_FLAG_SLOW = MLX5E_TC_FLOW_BASE + 3,
- MLX5E_TC_FLOW_FLAG_DUP = MLX5E_TC_FLOW_BASE + 4,
- MLX5E_TC_FLOW_FLAG_NOT_READY = MLX5E_TC_FLOW_BASE + 5,
- MLX5E_TC_FLOW_FLAG_DELETED = MLX5E_TC_FLOW_BASE + 6,
- MLX5E_TC_FLOW_FLAG_CT = MLX5E_TC_FLOW_BASE + 7,
- MLX5E_TC_FLOW_FLAG_L3_TO_L2_DECAP = MLX5E_TC_FLOW_BASE + 8,
-};
-
-#define MLX5E_TC_MAX_SPLITS 1
-
-/* Helper struct for accessing a struct containing list_head array.
- * Containing struct
- * |- Helper array
- * [0] Helper item 0
- * |- list_head item 0
- * |- index (0)
- * [1] Helper item 1
- * |- list_head item 1
- * |- index (1)
- * To access the containing struct from one of the list_head items:
- * 1. Get the helper item from the list_head item using
- * helper item =
- * container_of(list_head item, helper struct type, list_head field)
- * 2. Get the contining struct from the helper item and its index in the array:
- * containing struct =
- * container_of(helper item, containing struct type, helper field[index])
- */
-struct encap_flow_item {
- struct mlx5e_encap_entry *e; /* attached encap instance */
- struct list_head list;
- int index;
-};
-
-struct mlx5e_tc_flow {
- struct rhash_head node;
- struct mlx5e_priv *priv;
- u64 cookie;
- unsigned long flags;
- struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
-
- /* flows sharing the same reformat object - currently mpls decap */
- struct list_head l3_to_l2_reformat;
- struct mlx5e_decap_entry *decap_reformat;
-
- /* Flow can be associated with multiple encap IDs.
- * The number of encaps is bounded by the number of supported
- * destinations.
- */
- struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
- struct mlx5e_tc_flow *peer_flow;
- struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
- struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
- struct list_head hairpin; /* flows sharing the same hairpin */
- struct list_head peer; /* flows with peer flow */
- struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
- struct net_device *orig_dev; /* netdev adding flow first */
- int tmp_efi_index;
- struct list_head tmp_list; /* temporary flow list used by neigh update */
- refcount_t refcnt;
- struct rcu_head rcu_head;
- struct completion init_done;
- int tunnel_id; /* the mapped tunnel id of this flow */
- struct mlx5_flow_attr *attr;
-};
-
-struct mlx5e_tc_flow_parse_attr {
- const struct ip_tunnel_info *tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
- struct net_device *filter_dev;
- struct mlx5_flow_spec spec;
- struct mlx5e_tc_mod_hdr_acts mod_hdr_acts;
- int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
- struct ethhdr eth;
-};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
@@ -165,10 +83,15 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
.moffset = 0,
.mlen = 2,
},
+ [VPORT_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
+ .moffset = 2,
+ .mlen = 2,
+ },
[TUNNEL_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
.moffset = 1,
- .mlen = 3,
+ .mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8),
.soffset = MLX5_BYTE_OFF(fte_match_param,
misc_parameters_2.metadata_reg_c_1),
},
@@ -190,6 +113,14 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
};
+/* To avoid false lock dependency warning set the tc_ht lock
+ * class different than the lock class of the ht being used when deleting
+ * last flow from a group and then deleting a group, we get into del_sw_flow_group()
+ * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
+ * it's different than the ht->mutex here.
+ */
+static struct lock_class_key tc_ht_lock_key;
+
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
void
@@ -239,11 +170,11 @@ mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
}
int
-mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
- enum mlx5_flow_namespace_type ns,
- enum mlx5e_tc_attr_to_reg type,
- u32 data)
+mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data)
{
int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
@@ -267,9 +198,10 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
MLX5_SET(set_action_in, modact, offset, moffset * 8);
MLX5_SET(set_action_in, modact, length, mlen * 8);
MLX5_SET(set_action_in, modact, data, data);
+ err = mod_hdr_acts->num_actions;
mod_hdr_acts->num_actions++;
- return 0;
+ return err;
}
static struct mlx5_tc_ct_priv *
@@ -318,6 +250,41 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
mlx5e_del_offloaded_nic_rule(priv, rule, attr);
}
+int
+mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data)
+{
+ int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
+
+ return ret < 0 ? ret : 0;
+}
+
+void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ int act_id, u32 data)
+{
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
+ int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
+ int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
+ char *modact;
+
+ modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
+
+ /* Firmware has 5bit length field and 0 means 32bits */
+ if (mlen == 4)
+ mlen = 0;
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field, mfield);
+ MLX5_SET(set_action_in, modact, offset, moffset * 8);
+ MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, data, data);
+}
+
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -355,15 +322,14 @@ struct mlx5e_hairpin_entry {
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow);
-static struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
+struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
{
if (!flow || !refcount_inc_not_zero(&flow->refcnt))
return ERR_PTR(-EINVAL);
return flow;
}
-static void mlx5e_flow_put(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
+void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
{
if (refcount_dec_and_test(&flow->refcnt)) {
mlx5e_tc_del_flow(priv, flow);
@@ -371,48 +337,6 @@ static void mlx5e_flow_put(struct mlx5e_priv *priv,
}
}
-static void __flow_flag_set(struct mlx5e_tc_flow *flow, unsigned long flag)
-{
- /* Complete all memory stores before setting bit. */
- smp_mb__before_atomic();
- set_bit(flag, &flow->flags);
-}
-
-#define flow_flag_set(flow, flag) __flow_flag_set(flow, MLX5E_TC_FLOW_FLAG_##flag)
-
-static bool __flow_flag_test_and_set(struct mlx5e_tc_flow *flow,
- unsigned long flag)
-{
- /* test_and_set_bit() provides all necessary barriers */
- return test_and_set_bit(flag, &flow->flags);
-}
-
-#define flow_flag_test_and_set(flow, flag) \
- __flow_flag_test_and_set(flow, \
- MLX5E_TC_FLOW_FLAG_##flag)
-
-static void __flow_flag_clear(struct mlx5e_tc_flow *flow, unsigned long flag)
-{
- /* Complete all memory stores before clearing bit. */
- smp_mb__before_atomic();
- clear_bit(flag, &flow->flags);
-}
-
-#define flow_flag_clear(flow, flag) __flow_flag_clear(flow, \
- MLX5E_TC_FLOW_FLAG_##flag)
-
-static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
-{
- bool ret = test_bit(flag, &flow->flags);
-
- /* Read fields of flow structure only after checking flags. */
- smp_mb__after_atomic();
- return ret;
-}
-
-#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
- MLX5E_TC_FLOW_FLAG_##flag)
-
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, ESWITCH);
@@ -423,7 +347,7 @@ static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
return flow_flag_test(flow, FT);
}
-static bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
+bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, OFFLOADED);
}
@@ -1138,23 +1062,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
kfree(flow->attr);
}
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index);
-
-static int mlx5e_attach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct net_device *mirred_dev,
- int out_index,
- struct netlink_ext_ack *extack,
- struct net_device **encap_dev,
- bool *encap_valid);
-static int mlx5e_attach_decap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack);
-static void mlx5e_detach_decap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow);
-
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -1189,10 +1097,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
return rule;
}
-static void
-mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr)
+void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
@@ -1211,7 +1118,7 @@ offload_rule_0:
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
}
-static struct mlx5_flow_handle *
+struct mlx5_flow_handle *
mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec)
@@ -1237,9 +1144,8 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
return rule;
}
-static void
-mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow)
+void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow)
{
struct mlx5_flow_attr *slow_attr;
@@ -1307,6 +1213,63 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
mutex_unlock(&uplink_priv->unready_flows_lock);
}
+static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
+
+bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
+{
+ struct mlx5_core_dev *out_mdev, *route_mdev;
+ struct mlx5e_priv *out_priv, *route_priv;
+
+ out_priv = netdev_priv(out_dev);
+ out_mdev = out_priv->mdev;
+ route_priv = netdev_priv(route_dev);
+ route_mdev = route_priv->mdev;
+
+ if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
+ route_mdev->coredev_type != MLX5_COREDEV_VF)
+ return false;
+
+ return same_hw_devs(out_priv, route_priv);
+}
+
+int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
+{
+ struct mlx5e_priv *out_priv, *route_priv;
+ struct mlx5_core_dev *route_mdev;
+ struct mlx5_eswitch *esw;
+ u16 vhca_id;
+ int err;
+
+ out_priv = netdev_priv(out_dev);
+ esw = out_priv->mdev->priv.eswitch;
+ route_priv = netdev_priv(route_dev);
+ route_mdev = route_priv->mdev;
+
+ vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
+ err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
+ return err;
+}
+
+int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
+ struct mlx5_modify_hdr *mod_hdr;
+
+ mod_hdr = mlx5_modify_header_alloc(priv->mdev,
+ get_flow_name_space(flow),
+ mod_hdr_acts->num_actions,
+ mod_hdr_acts->actions);
+ if (IS_ERR(mod_hdr))
+ return PTR_ERR(mod_hdr);
+
+ WARN_ON(flow->attr->modify_hdr);
+ flow->attr->modify_hdr = mod_hdr;
+
+ return 0;
+}
+
static int
mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
@@ -1316,21 +1279,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
+ bool vf_tun = false, encap_valid = true;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
- bool encap_valid = true;
u32 max_prio, max_chain;
int err = 0;
int out_index;
- if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
- NL_SET_ERR_MSG_MOD(extack,
- "E-switch priorities unsupported, upgrade FW");
- return -EOPNOTSUPP;
- }
-
/* We check chain range only for tc flows.
* For ft flows, we checked attr->chain was originally 0 and set it to
* FDB_FT_CHAIN which is outside tc range.
@@ -1340,20 +1297,28 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
}
max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
if (attr->prio > max_prio) {
NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
+ if (flow_flag_test(flow, TUN_RX)) {
+ err = mlx5e_attach_decap_route(priv, flow);
+ if (err)
+ goto err_out;
}
if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
err = mlx5e_attach_decap(priv, flow, extack);
if (err)
- return err;
+ goto err_out;
}
parse_attr = attr->parse_attr;
@@ -1371,8 +1336,11 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
if (err)
- return err;
+ goto err_out;
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ vf_tun = true;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
esw_attr->dests[out_index].rep = rpriv->rep;
@@ -1381,20 +1349,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
- return err;
+ goto err_out;
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
- err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- if (err)
- return err;
+ if (vf_tun) {
+ err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
+ if (err)
+ goto err_out;
+ } else {
+ err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
+ if (err)
+ goto err_out;
+ }
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(esw_attr->counter_dev, true);
- if (IS_ERR(counter))
- return PTR_ERR(counter);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_out;
+ }
attr->counter = counter;
}
@@ -1408,12 +1383,17 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
else
flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
- if (IS_ERR(flow->rule[0]))
- return PTR_ERR(flow->rule[0]);
- else
- flow_flag_set(flow, OFFLOADED);
+ if (IS_ERR(flow->rule[0])) {
+ err = PTR_ERR(flow->rule[0]);
+ goto err_out;
+ }
+ flow_flag_set(flow, OFFLOADED);
return 0;
+
+err_out:
+ flow_flag_set(flow, FAILED);
+ return err;
}
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
@@ -1434,8 +1414,11 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
+ bool vf_tun = false;
int out_index;
+ esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
if (flow_flag_test(flow, NOT_READY))
@@ -1453,20 +1436,33 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr);
- for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
+ if (flow->decap_route)
+ mlx5e_detach_decap_route(priv, flow);
+
+ for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ if (esw_attr->dests[out_index].flags &
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ vf_tun = true;
+ if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
kfree(attr->parse_attr->tun_info[out_index]);
}
- kvfree(attr->parse_attr);
+ }
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- mlx5e_detach_mod_hdr(priv, flow);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
+ if (vf_tun && attr->modify_hdr)
+ mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
+ else
+ mlx5e_detach_mod_hdr(priv, flow);
+ }
+ kvfree(attr->parse_attr);
+ kvfree(attr->esw_attr->rx_tun_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter);
+ mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
@@ -1474,141 +1470,13 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
kfree(flow->attr);
}
-void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
- struct list_head *flow_list)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *esw_attr;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_attr *attr;
- struct mlx5_flow_spec *spec;
- struct mlx5e_tc_flow *flow;
- int err;
-
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- e->encap_size, e->encap_header,
- MLX5_FLOW_NAMESPACE_FDB);
- if (IS_ERR(e->pkt_reformat)) {
- mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
- PTR_ERR(e->pkt_reformat));
- return;
- }
- e->flags |= MLX5_ENCAP_ENTRY_VALID;
- mlx5e_rep_queue_neigh_stats_work(priv);
-
- list_for_each_entry(flow, flow_list, tmp_list) {
- bool all_flow_encaps_valid = true;
- int i;
-
- if (!mlx5e_is_offloaded_flow(flow))
- continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
-
- esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
- esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
- /* Flow can be associated with multiple encap entries.
- * Before offloading the flow verify that all of them have
- * a valid neighbour.
- */
- for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
- if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
- continue;
- if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
- all_flow_encaps_valid = false;
- break;
- }
- }
- /* Do not offload flows with unresolved neighbors */
- if (!all_flow_encaps_valid)
- continue;
- /* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
- err);
- continue;
- }
-
- mlx5e_tc_unoffload_from_slow_path(esw, flow);
- flow->rule[0] = rule;
- /* was unset when slow path rule removed */
- flow_flag_set(flow, OFFLOADED);
- }
-}
-
-void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
- struct mlx5e_encap_entry *e,
- struct list_head *flow_list)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *esw_attr;
- struct mlx5_flow_handle *rule;
- struct mlx5_flow_attr *attr;
- struct mlx5_flow_spec *spec;
- struct mlx5e_tc_flow *flow;
- int err;
-
- list_for_each_entry(flow, flow_list, tmp_list) {
- if (!mlx5e_is_offloaded_flow(flow))
- continue;
- attr = flow->attr;
- esw_attr = attr->esw_attr;
- spec = &attr->parse_attr->spec;
-
- /* update from encap rule to slow path rule */
- rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
- /* mark the flow's encap dest as non-valid */
- esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
-
- if (IS_ERR(rule)) {
- err = PTR_ERR(rule);
- mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
- err);
- continue;
- }
-
- mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
- flow->rule[0] = rule;
- /* was unset when fast path rule removed */
- flow_flag_set(flow, OFFLOADED);
- }
-
- /* we know that the encap is valid */
- e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
- mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
-}
-
-static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
+struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
return flow->attr->counter;
}
-/* Takes reference to all flows attached to encap and adds the flows to
- * flow_list using 'tmp_list' list_head in mlx5e_tc_flow.
- */
-void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list)
-{
- struct encap_flow_item *efi;
- struct mlx5e_tc_flow *flow;
-
- list_for_each_entry(efi, &e->flows, list) {
- flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
- if (IS_ERR(mlx5e_flow_get(flow)))
- continue;
- wait_for_completion(&flow->init_done);
-
- flow->tmp_efi_index = efi->index;
- list_add(&flow->tmp_list, flow_list);
- }
-}
-
/* Iterate over tmp_list of flows attached to flow_list head. */
-void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
+void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
{
struct mlx5e_tc_flow *flow, *tmp;
@@ -1616,222 +1484,6 @@ void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_l
mlx5e_flow_put(priv, flow);
}
-static struct mlx5e_encap_entry *
-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
- struct mlx5e_encap_entry *e)
-{
- struct mlx5e_encap_entry *next = NULL;
-
-retry:
- rcu_read_lock();
-
- /* find encap with non-zero reference counter value */
- for (next = e ?
- list_next_or_null_rcu(&nhe->encap_list,
- &e->encap_list,
- struct mlx5e_encap_entry,
- encap_list) :
- list_first_or_null_rcu(&nhe->encap_list,
- struct mlx5e_encap_entry,
- encap_list);
- next;
- next = list_next_or_null_rcu(&nhe->encap_list,
- &next->encap_list,
- struct mlx5e_encap_entry,
- encap_list))
- if (mlx5e_encap_take(next))
- break;
-
- rcu_read_unlock();
-
- /* release starting encap */
- if (e)
- mlx5e_encap_put(netdev_priv(e->out_dev), e);
- if (!next)
- return next;
-
- /* wait for encap to be fully initialized */
- wait_for_completion(&next->res_ready);
- /* continue searching if encap entry is not in valid state after completion */
- if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
- e = next;
- goto retry;
- }
-
- return next;
-}
-
-void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
-{
- struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
- struct mlx5e_encap_entry *e = NULL;
- struct mlx5e_tc_flow *flow;
- struct mlx5_fc *counter;
- struct neigh_table *tbl;
- bool neigh_used = false;
- struct neighbour *n;
- u64 lastuse;
-
- if (m_neigh->family == AF_INET)
- tbl = &arp_tbl;
-#if IS_ENABLED(CONFIG_IPV6)
- else if (m_neigh->family == AF_INET6)
- tbl = ipv6_stub->nd_tbl;
-#endif
- else
- return;
-
- /* mlx5e_get_next_valid_encap() releases previous encap before returning
- * next one.
- */
- while ((e = mlx5e_get_next_valid_encap(nhe, e)) != NULL) {
- struct mlx5e_priv *priv = netdev_priv(e->out_dev);
- struct encap_flow_item *efi, *tmp;
- struct mlx5_eswitch *esw;
- LIST_HEAD(flow_list);
-
- esw = priv->mdev->priv.eswitch;
- mutex_lock(&esw->offloads.encap_tbl_lock);
- list_for_each_entry_safe(efi, tmp, &e->flows, list) {
- flow = container_of(efi, struct mlx5e_tc_flow,
- encaps[efi->index]);
- if (IS_ERR(mlx5e_flow_get(flow)))
- continue;
- list_add(&flow->tmp_list, &flow_list);
-
- if (mlx5e_is_offloaded_flow(flow)) {
- counter = mlx5e_tc_get_counter(flow);
- lastuse = mlx5_fc_query_lastuse(counter);
- if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
- neigh_used = true;
- break;
- }
- }
- }
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- mlx5e_put_encap_flow_list(priv, &flow_list);
- if (neigh_used) {
- /* release current encap before breaking the loop */
- mlx5e_encap_put(priv, e);
- break;
- }
- }
-
- trace_mlx5e_tc_update_neigh_used_value(nhe, neigh_used);
-
- if (neigh_used) {
- nhe->reported_lastuse = jiffies;
-
- /* find the relevant neigh according to the cached device and
- * dst ip pair
- */
- n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
- if (!n)
- return;
-
- neigh_event_send(n, NULL);
- neigh_release(n);
- }
-}
-
-static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
-{
- WARN_ON(!list_empty(&e->flows));
-
- if (e->compl_result > 0) {
- mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
-
- if (e->flags & MLX5_ENCAP_ENTRY_VALID)
- mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
- }
-
- kfree(e->tun_info);
- kfree(e->encap_header);
- kfree_rcu(e, rcu);
-}
-
-static void mlx5e_decap_dealloc(struct mlx5e_priv *priv,
- struct mlx5e_decap_entry *d)
-{
- WARN_ON(!list_empty(&d->flows));
-
- if (!d->compl_result)
- mlx5_packet_reformat_dealloc(priv->mdev, d->pkt_reformat);
-
- kfree_rcu(d, rcu);
-}
-
-void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
- return;
- hash_del_rcu(&e->encap_hlist);
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- mlx5e_encap_dealloc(priv, e);
-}
-
-static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- if (!refcount_dec_and_mutex_lock(&d->refcnt, &esw->offloads.decap_tbl_lock))
- return;
- hash_del_rcu(&d->hlist);
- mutex_unlock(&esw->offloads.decap_tbl_lock);
-
- mlx5e_decap_dealloc(priv, d);
-}
-
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow, int out_index)
-{
- struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- /* flow wasn't fully initialized */
- if (!e)
- return;
-
- mutex_lock(&esw->offloads.encap_tbl_lock);
- list_del(&flow->encaps[out_index].list);
- flow->encaps[out_index].e = NULL;
- if (!refcount_dec_and_test(&e->refcnt)) {
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- return;
- }
- hash_del_rcu(&e->encap_hlist);
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- mlx5e_encap_dealloc(priv, e);
-}
-
-static void mlx5e_detach_decap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_decap_entry *d = flow->decap_reformat;
-
- if (!d)
- return;
-
- mutex_lock(&esw->offloads.decap_tbl_lock);
- list_del(&flow->l3_to_l2_reformat);
- flow->decap_reformat = NULL;
-
- if (!refcount_dec_and_test(&d->refcnt)) {
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- return;
- }
- hash_del_rcu(&d->hlist);
- mutex_unlock(&esw->offloads.decap_tbl_lock);
-
- mlx5e_decap_dealloc(priv, d);
-}
-
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
@@ -2089,6 +1741,29 @@ void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
}
}
+u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
+{
+ void *headers_v;
+ u16 ethertype;
+ u8 ip_version;
+
+ if (outer)
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+ else
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
+
+ ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
+ /* Return ip_version converted from ethertype anyway */
+ if (!ip_version) {
+ ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
+ if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
+ ip_version = 4;
+ else if (ethertype == ETH_P_IPV6)
+ ip_version = 6;
+ }
+ return ip_version;
+}
+
static int parse_tunnel_attr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
@@ -2097,6 +1772,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
u8 *match_level,
bool *match_inner)
{
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
bool needs_mapping, sets_mapping;
@@ -2134,6 +1810,31 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
*/
if (!netif_is_bareudp(filter_dev))
flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ err = mlx5e_tc_set_attr_rx_tun(flow, spec);
+ if (err)
+ return err;
+ } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ struct mlx5_flow_spec *tmp_spec;
+
+ tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
+ if (!tmp_spec) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
+ netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
+ return -ENOMEM;
+ }
+ memcpy(tmp_spec, spec, sizeof(*tmp_spec));
+
+ err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
+ if (err) {
+ kvfree(tmp_spec);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
+ netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
+ return err;
+ }
+ err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ kvfree(tmp_spec);
+ if (err)
+ return err;
}
if (!needs_mapping && !sets_mapping)
@@ -3582,35 +3283,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return 0;
}
-struct encap_key {
- const struct ip_tunnel_key *ip_tun_key;
- struct mlx5e_tc_tunnel *tc_tunnel;
-};
-
-static inline int cmp_encap_info(struct encap_key *a,
- struct encap_key *b)
-{
- return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
- a->tc_tunnel->tunnel_type != b->tc_tunnel->tunnel_type;
-}
-
-static inline int cmp_decap_info(struct mlx5e_decap_key *a,
- struct mlx5e_decap_key *b)
-{
- return memcmp(&a->key, &b->key, sizeof(b->key));
-}
-
-static inline int hash_encap_info(struct encap_key *key)
-{
- return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
- key->tc_tunnel->tunnel_type);
-}
-
-static inline int hash_decap_info(struct mlx5e_decap_key *key)
-{
- return jhash(&key->key, sizeof(key->key), 0);
-}
-
static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
@@ -3624,277 +3296,6 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
same_hw_devs(priv, peer_priv));
}
-bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
-{
- return refcount_inc_not_zero(&e->refcnt);
-}
-
-static bool mlx5e_decap_take(struct mlx5e_decap_entry *e)
-{
- return refcount_inc_not_zero(&e->refcnt);
-}
-
-static struct mlx5e_encap_entry *
-mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
- uintptr_t hash_key)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_encap_entry *e;
- struct encap_key e_key;
-
- hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
- encap_hlist, hash_key) {
- e_key.ip_tun_key = &e->tun_info->key;
- e_key.tc_tunnel = e->tunnel;
- if (!cmp_encap_info(&e_key, key) &&
- mlx5e_encap_take(e))
- return e;
- }
-
- return NULL;
-}
-
-static struct mlx5e_decap_entry *
-mlx5e_decap_get(struct mlx5e_priv *priv, struct mlx5e_decap_key *key,
- uintptr_t hash_key)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_decap_key r_key;
- struct mlx5e_decap_entry *e;
-
- hash_for_each_possible_rcu(esw->offloads.decap_tbl, e,
- hlist, hash_key) {
- r_key = e->key;
- if (!cmp_decap_info(&r_key, key) &&
- mlx5e_decap_take(e))
- return e;
- }
- return NULL;
-}
-
-static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
-{
- size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
-
- return kmemdup(tun_info, tun_size, GFP_KERNEL);
-}
-
-static bool is_duplicated_encap_entry(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- int out_index,
- struct mlx5e_encap_entry *e,
- struct netlink_ext_ack *extack)
-{
- int i;
-
- for (i = 0; i < out_index; i++) {
- if (flow->encaps[i].e != e)
- continue;
- NL_SET_ERR_MSG_MOD(extack, "can't duplicate encap action");
- netdev_err(priv->netdev, "can't duplicate encap action\n");
- return true;
- }
-
- return false;
-}
-
-static int mlx5e_attach_encap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct net_device *mirred_dev,
- int out_index,
- struct netlink_ext_ack *extack,
- struct net_device **encap_dev,
- bool *encap_valid)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5_flow_attr *attr = flow->attr;
- const struct ip_tunnel_info *tun_info;
- struct encap_key key;
- struct mlx5e_encap_entry *e;
- unsigned short family;
- uintptr_t hash_key;
- int err = 0;
-
- parse_attr = attr->parse_attr;
- tun_info = parse_attr->tun_info[out_index];
- family = ip_tunnel_info_af(tun_info);
- key.ip_tun_key = &tun_info->key;
- key.tc_tunnel = mlx5e_get_tc_tun(mirred_dev);
- if (!key.tc_tunnel) {
- NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel");
- return -EOPNOTSUPP;
- }
-
- hash_key = hash_encap_info(&key);
-
- mutex_lock(&esw->offloads.encap_tbl_lock);
- e = mlx5e_encap_get(priv, &key, hash_key);
-
- /* must verify if encap is valid or not */
- if (e) {
- /* Check that entry was not already attached to this flow */
- if (is_duplicated_encap_entry(priv, flow, out_index, e, extack)) {
- err = -EOPNOTSUPP;
- goto out_err;
- }
-
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- wait_for_completion(&e->res_ready);
-
- /* Protect against concurrent neigh update. */
- mutex_lock(&esw->offloads.encap_tbl_lock);
- if (e->compl_result < 0) {
- err = -EREMOTEIO;
- goto out_err;
- }
- goto attach_flow;
- }
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e) {
- err = -ENOMEM;
- goto out_err;
- }
-
- refcount_set(&e->refcnt, 1);
- init_completion(&e->res_ready);
-
- tun_info = dup_tun_info(tun_info);
- if (!tun_info) {
- err = -ENOMEM;
- goto out_err_init;
- }
- e->tun_info = tun_info;
- err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
- if (err)
- goto out_err_init;
-
- INIT_LIST_HEAD(&e->flows);
- hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- if (family == AF_INET)
- err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
- else if (family == AF_INET6)
- err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
-
- /* Protect against concurrent neigh update. */
- mutex_lock(&esw->offloads.encap_tbl_lock);
- complete_all(&e->res_ready);
- if (err) {
- e->compl_result = err;
- goto out_err;
- }
- e->compl_result = 1;
-
-attach_flow:
- flow->encaps[out_index].e = e;
- list_add(&flow->encaps[out_index].list, &e->flows);
- flow->encaps[out_index].index = out_index;
- *encap_dev = e->out_dev;
- if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
- attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
- *encap_valid = true;
- } else {
- *encap_valid = false;
- }
- mutex_unlock(&esw->offloads.encap_tbl_lock);
-
- return err;
-
-out_err:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- if (e)
- mlx5e_encap_put(priv, e);
- return err;
-
-out_err_init:
- mutex_unlock(&esw->offloads.encap_tbl_lock);
- kfree(tun_info);
- kfree(e);
- return err;
-}
-
-static int mlx5e_attach_decap(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
- struct mlx5e_decap_entry *d;
- struct mlx5e_decap_key key;
- uintptr_t hash_key;
- int err = 0;
-
- parse_attr = flow->attr->parse_attr;
- if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
- NL_SET_ERR_MSG_MOD(extack,
- "encap header larger than max supported");
- return -EOPNOTSUPP;
- }
-
- key.key = parse_attr->eth;
- hash_key = hash_decap_info(&key);
- mutex_lock(&esw->offloads.decap_tbl_lock);
- d = mlx5e_decap_get(priv, &key, hash_key);
- if (d) {
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- wait_for_completion(&d->res_ready);
- mutex_lock(&esw->offloads.decap_tbl_lock);
- if (d->compl_result) {
- err = -EREMOTEIO;
- goto out_free;
- }
- goto found;
- }
-
- d = kzalloc(sizeof(*d), GFP_KERNEL);
- if (!d) {
- err = -ENOMEM;
- goto out_err;
- }
-
- d->key = key;
- refcount_set(&d->refcnt, 1);
- init_completion(&d->res_ready);
- INIT_LIST_HEAD(&d->flows);
- hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
- mutex_unlock(&esw->offloads.decap_tbl_lock);
-
- d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
- sizeof(parse_attr->eth),
- &parse_attr->eth,
- MLX5_FLOW_NAMESPACE_FDB);
- if (IS_ERR(d->pkt_reformat)) {
- err = PTR_ERR(d->pkt_reformat);
- d->compl_result = err;
- }
- mutex_lock(&esw->offloads.decap_tbl_lock);
- complete_all(&d->res_ready);
- if (err)
- goto out_free;
-
-found:
- flow->decap_reformat = d;
- attr->decap_pkt_reformat = d->pkt_reformat;
- list_add(&flow->l3_to_l2_reformat, &d->flows);
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- return 0;
-
-out_free:
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- mlx5e_decap_put(priv, d);
- return err;
-
-out_err:
- mutex_unlock(&esw->offloads.decap_tbl_lock);
- return err;
-}
-
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
@@ -4247,7 +3648,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (encap) {
parse_attr->mirred_ifindex[esw_attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info);
+ parse_attr->tun_info[esw_attr->out_count] =
+ mlx5e_dup_tun_info(info);
if (!parse_attr->tun_info[esw_attr->out_count])
return -ENOMEM;
encap = false;
@@ -4384,6 +3786,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
}
}
+ /* always set IP version for indirect table handling */
+ attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
+
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
/* For prio tag mode, replace vlan pop with rewrite vlan prio
@@ -4664,7 +4069,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow;
err_free:
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
return ERR_PTR(err);
@@ -4809,6 +4213,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
return 0;
err_free:
+ flow_flag_set(flow, FAILED);
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow);
out:
@@ -5040,7 +4445,7 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
*/
if (rate) {
rate = (rate * BITS_PER_BYTE) + 500000;
- rate_mbps = max_t(u32, do_div(rate, 1000000), 1);
+ rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
}
err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
@@ -5221,6 +4626,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
if (err)
return err;
+ lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
+
if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
@@ -5328,7 +4735,8 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_mapping = mapping;
- mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK, true);
+ /* 0xFFF is reserved for stack devices slow path table mark */
+ mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
@@ -5339,8 +4747,18 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
if (err)
goto err_ht_init;
- return err;
+ lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
+
+ uplink_priv->encap = mlx5e_tc_tun_init(priv);
+ if (IS_ERR(uplink_priv->encap)) {
+ err = PTR_ERR(uplink_priv->encap);
+ goto err_register_fib_notifier;
+ }
+ return 0;
+
+err_register_fib_notifier:
+ rhashtable_destroy(tc_ht);
err_ht_init:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
@@ -5357,10 +4775,11 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
{
struct mlx5_rep_uplink_priv *uplink_priv;
- rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
-
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+ rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
+ mlx5e_tc_tun_cleanup(uplink_priv->encap);
+
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
@@ -5460,7 +4879,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
tc_skb_ext->chain = chain;
zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
- ZONE_RESTORE_MAX;
+ ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
zone_restore_id))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 4a2ce241522e..89003ae7775a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -37,6 +37,8 @@
#include "en.h"
#include "eswitch.h"
#include "en/tc_ct.h"
+#include "en/tc_tun.h"
+#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -76,6 +78,7 @@ struct mlx5_flow_attr {
struct mlx5_flow_table *dest_ft;
u8 inner_match_level;
u8 outer_match_level;
+ u8 ip_version;
u32 flags;
union {
struct mlx5_esw_flow_attr esw_attr[0];
@@ -83,6 +86,19 @@ struct mlx5_flow_attr {
};
};
+struct mlx5_rx_tun_attr {
+ u16 decap_vport;
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } src_ip; /* Valid if decap_vport is not zero */
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } dst_ip; /* Valid if decap_vport is not zero */
+ u32 vni;
+};
+
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
@@ -158,7 +174,7 @@ bool mlx5e_encap_take(struct mlx5e_encap_entry *e);
void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e);
void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *flow_list);
-void mlx5e_put_encap_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
+void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
struct mlx5e_neigh_hash_entry;
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
@@ -167,6 +183,7 @@ void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
enum mlx5e_tc_attr_to_reg {
CHAIN_TO_REG,
+ VPORT_TO_REG,
TUNNEL_TO_REG,
CTSTATE_TO_REG,
ZONE_TO_REG,
@@ -197,6 +214,11 @@ int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
enum mlx5e_tc_attr_to_reg type,
u32 data);
+void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5e_tc_attr_to_reg type,
+ int act_id, u32 data);
+
void mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
u32 data,
@@ -207,6 +229,16 @@ void mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
u32 *data,
u32 *mask);
+int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
+ struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
+ enum mlx5e_tc_attr_to_reg type,
+ u32 data);
+
+int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow);
+
int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
int namespace,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
@@ -242,6 +274,10 @@ mlx5_tc_rule_delete(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr);
+bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev);
+int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
+ u16 *vport);
+
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
@@ -283,7 +319,7 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
reg_b = be32_to_cpu(cqe->ft_metadata);
- if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ZONE_RESTORE_BITS))
+ if (reg_b >> (MLX5E_TC_TABLE_CHAIN_TAG_BITS + ESW_ZONE_ID_BITS))
return false;
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 61ed671fe741..bdbffe484fce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -39,7 +39,6 @@
#include "en/txrx.h"
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
-#include "lib/clock.h"
#include "en/ptp.h"
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
@@ -106,28 +105,53 @@ return_txq:
return priv->port_ptp_tc2realtxq[up];
}
+static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
+ u16 htb_maj_id)
+{
+ u16 classid;
+
+ if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
+ classid = TC_H_MIN(skb->priority);
+ else
+ classid = READ_ONCE(priv->htb.defcls);
+
+ if (!classid)
+ return 0;
+
+ return mlx5e_get_txq_by_classid(priv, classid);
+}
+
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int num_tc_x_num_ch;
int txq_ix;
int up = 0;
int ch_ix;
- if (unlikely(priv->channels.port_ptp)) {
- int num_tc_x_num_ch;
+ /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
+ num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+ if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
+ /* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
+ u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- mlx5e_use_ptpsq(skb))
- return mlx5e_select_ptpsq(dev, skb);
+ if (unlikely(htb_maj_id)) {
+ txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
+ if (txq_ix > 0)
+ return txq_ix;
+ }
- /* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
- num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
+ if (unlikely(priv->channels.port_ptp))
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ mlx5e_use_ptpsq(skb))
+ return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
- /* Fix netdev_pick_tx() not to choose ptp_channel txqs.
+ /* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
* If they are selected, switch to regular queues.
- * Driver to select these queues only at mlx5e_select_ptpsq().
+ * Driver to select these queues only at mlx5e_select_ptpsq()
+ * and mlx5e_select_htb_queue().
*/
if (unlikely(txq_ix >= num_tc_x_num_ch))
txq_ix %= num_tc_x_num_ch;
@@ -241,9 +265,8 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
- } else if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
-
} else
sq->stats->csum_none++;
}
@@ -703,6 +726,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
u16 pi;
sq = priv->txq2sq[skb_get_queue_mapping(skb)];
+ if (unlikely(!sq)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
/* May send SKBs and WQEs. */
if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
@@ -774,7 +801,7 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct skb_shared_hwtstamps hwts = {};
u64 ts = get_cqe_ts(cqe);
- hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
+ hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
if (sq->ptpsq)
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
hwts.hwtstamp, sq->ptpsq->cq_stats);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index a3cfe06d5116..d54da3797c30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -115,17 +115,21 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
napi);
struct mlx5e_ch_stats *ch_stats = c->stats;
struct mlx5e_xdpsq *xsksq = &c->xsksq;
+ struct mlx5e_txqsq __rcu **qos_sqs;
struct mlx5e_rq *xskrq = &c->xskrq;
struct mlx5e_rq *rq = &c->rq;
bool aff_change = false;
bool busy_xsk = false;
bool busy = false;
int work_done = 0;
+ u16 qos_sqs_size;
bool xsk_open;
int i;
rcu_read_lock();
+ qos_sqs = rcu_dereference(c->qos_sqs);
+
xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state);
ch_stats->poll++;
@@ -133,6 +137,18 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+ if (unlikely(qos_sqs)) {
+ smp_rmb(); /* Pairs with mlx5e_qos_alloc_queues. */
+ qos_sqs_size = READ_ONCE(c->qos_sqs_size);
+
+ for (i = 0; i < qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
+
+ if (sq)
+ busy |= mlx5e_poll_tx_cq(&sq->cq, budget);
+ }
+ }
+
busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
if (c->xdp)
@@ -186,6 +202,16 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
mlx5e_handle_tx_dim(&c->sq[i]);
mlx5e_cq_arm(&c->sq[i].cq);
}
+ if (unlikely(qos_sqs)) {
+ for (i = 0; i < qos_sqs_size; i++) {
+ struct mlx5e_txqsq *sq = rcu_dereference(qos_sqs[i]);
+
+ if (sq) {
+ mlx5e_handle_tx_dim(sq);
+ mlx5e_cq_arm(&sq->cq);
+ }
+ }
+ }
mlx5e_handle_rx_dim(rq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index fc0afa03d407..174dfbc996c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -467,7 +467,7 @@ int mlx5_eq_table_init(struct mlx5_core_dev *dev)
for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++)
ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]);
- eq_table->irq_table = dev->priv.irq_table;
+ eq_table->irq_table = mlx5_irq_table_get(dev);
return 0;
}
@@ -595,6 +595,9 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
async_event_mask |=
(1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED);
+ if (MLX5_CAP_GEN_MAX(dev, vhca_state))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_VHCA_STATE_CHANGE);
+
mask[0] = async_event_mask;
if (MLX5_CAP_GEN(dev, event_cap))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 4c74e2690d57..26b37a0f8762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -150,7 +150,7 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
{
- return mlx5_eswitch_is_vf_vport(esw, vport_num);
+ return mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_esw_is_sf_vport(esw, vport_num);
}
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index ffff11baa3d0..cb1e181f4c6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -122,3 +122,44 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
vport = mlx5_eswitch_get_vport(esw, vport_num);
return vport->dl_port;
}
+
+int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct netdev_phys_item_id ppid = {};
+ unsigned int dl_port_index;
+ struct mlx5_vport *vport;
+ struct devlink *devlink;
+ u16 pfnum;
+ int err;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ pfnum = PCI_FUNC(dev->pdev->devfn);
+ mlx5_esw_get_port_parent_id(dev, &ppid);
+ memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_sf_set(dl_port, 0, pfnum, sfnum);
+ devlink = priv_to_devlink(dev);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
+ err = devlink_port_register(devlink, dl_port, dl_port_index);
+ if (err)
+ return err;
+
+ vport->dl_port = dl_port;
+ return 0;
+}
+
+void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+ devlink_port_unregister(vport->dl_port);
+ vport->dl_port = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
new file mode 100644
index 000000000000..6f6772bf61a2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/etherdevice.h>
+#include <linux/idr.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+#include "en.h"
+#include "en_tc.h"
+#include "fs_core.h"
+#include "esw/indir_table.h"
+#include "lib/fs_chains.h"
+
+#define MLX5_ESW_INDIR_TABLE_SIZE 128
+#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
+#define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1)
+
+struct mlx5_esw_indir_table_rule {
+ struct list_head list;
+ struct mlx5_flow_handle *handle;
+ union {
+ __be32 v4;
+ struct in6_addr v6;
+ } dst_ip;
+ u32 vni;
+ struct mlx5_modify_hdr *mh;
+ refcount_t refcnt;
+};
+
+struct mlx5_esw_indir_table_entry {
+ struct hlist_node hlist;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *recirc_grp;
+ struct mlx5_flow_group *fwd_grp;
+ struct mlx5_flow_handle *fwd_rule;
+ struct list_head recirc_rules;
+ int recirc_cnt;
+ int fwd_ref;
+
+ u16 vport;
+ u8 ip_version;
+};
+
+struct mlx5_esw_indir_table {
+ struct mutex lock; /* protects table */
+ DECLARE_HASHTABLE(table, 8);
+};
+
+struct mlx5_esw_indir_table *
+mlx5_esw_indir_table_init(void)
+{
+ struct mlx5_esw_indir_table *indir = kvzalloc(sizeof(*indir), GFP_KERNEL);
+
+ if (!indir)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&indir->lock);
+ hash_init(indir->table);
+ return indir;
+}
+
+void
+mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
+{
+ mutex_destroy(&indir->lock);
+ kvfree(indir);
+}
+
+bool
+mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport_num,
+ struct mlx5_core_dev *dest_mdev)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ /* Use indirect table for all IP traffic from UL to VF with vport
+ * destination when source rewrite flag is set.
+ */
+ return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
+ mlx5_eswitch_is_vf_vport(esw, vport_num) &&
+ esw->dev == dest_mdev &&
+ attr->ip_version &&
+ attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+}
+
+u16
+mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0;
+}
+
+static struct mlx5_esw_indir_table_rule *
+mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e,
+ struct mlx5_esw_flow_attr *attr)
+{
+ struct mlx5_esw_indir_table_rule *rule;
+
+ list_for_each_entry(rule, &e->recirc_rules, list)
+ if (rule->vni == attr->rx_tun_attr->vni &&
+ !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip,
+ sizeof(attr->rx_tun_attr->dst_ip)))
+ goto found;
+ return NULL;
+
+found:
+ refcount_inc(&rule->refcnt);
+ return rule;
+}
+
+static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_indir_table_entry *e)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ struct mlx5e_tc_mod_hdr_acts mod_acts = {};
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_esw_indir_table_rule *rule;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ int err = 0;
+ u32 data;
+
+ rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr);
+ if (rule)
+ return 0;
+
+ if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX)
+ return -EINVAL;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return -ENOMEM;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS |
+ MLX5_MATCH_MISC_PARAMETERS_2;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) {
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ outer_headers.ip_version, 0xf);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version,
+ attr->ip_version);
+ } else if (attr->ip_version) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype,
+ (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6));
+ } else {
+ err = -EOPNOTSUPP;
+ goto err_ethertype;
+ }
+
+ if (attr->ip_version == 4) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ MLX5_SET(fte_match_param, rule_spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+ ntohl(esw_attr->rx_tun_attr->dst_ip.v4));
+ } else if (attr->ip_version == 6) {
+ int len = sizeof(struct in6_addr);
+
+ memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, len);
+ memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &esw_attr->rx_tun_attr->dst_ip.v6, len);
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ misc_parameters.vxlan_vni);
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni,
+ MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni));
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch,
+ MLX5_VPORT_UPLINK));
+
+ /* Modify flow source to recirculate packet */
+ data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport);
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ VPORT_TO_REG, data);
+ if (err)
+ goto err_mod_hdr_regc0;
+
+ err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
+ TUNNEL_TO_REG, ESW_TUN_SLOW_TABLE_GOTO_VPORT);
+ if (err)
+ goto err_mod_hdr_regc1;
+
+ flow_act.modify_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
+ mod_acts.num_actions, mod_acts.actions);
+ if (IS_ERR(flow_act.modify_hdr)) {
+ err = PTR_ERR(flow_act.modify_hdr);
+ goto err_mod_hdr_alloc;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ if (IS_ERR(dest.ft)) {
+ err = PTR_ERR(dest.ft);
+ goto err_table;
+ }
+ handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ goto err_handle;
+ }
+
+ dealloc_mod_hdr_actions(&mod_acts);
+ rule->handle = handle;
+ rule->vni = esw_attr->rx_tun_attr->vni;
+ rule->mh = flow_act.modify_hdr;
+ memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
+ sizeof(esw_attr->rx_tun_attr->dst_ip));
+ refcount_set(&rule->refcnt, 1);
+ list_add(&rule->list, &e->recirc_rules);
+ e->recirc_cnt++;
+ goto out;
+
+err_handle:
+ mlx5_chains_put_table(chains, 0, 1, 0);
+err_table:
+ mlx5_modify_header_dealloc(esw->dev, flow_act.modify_hdr);
+err_mod_hdr_alloc:
+err_mod_hdr_regc1:
+ dealloc_mod_hdr_actions(&mod_acts);
+err_mod_hdr_regc0:
+err_ethertype:
+ kfree(rule);
+out:
+ kfree(rule_spec);
+ return err;
+}
+
+static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_esw_indir_table_entry *e)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ struct mlx5_esw_indir_table_rule *rule;
+
+ list_for_each_entry(rule, &e->recirc_rules, list)
+ if (rule->vni == esw_attr->rx_tun_attr->vni &&
+ !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
+ sizeof(esw_attr->rx_tun_attr->dst_ip)))
+ goto found;
+
+ return;
+
+found:
+ if (!refcount_dec_and_test(&rule->refcnt))
+ return;
+
+ mlx5_del_flow_rules(rule->handle);
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ mlx5_modify_header_dealloc(esw->dev, rule->mh);
+ list_del(&rule->list);
+ kfree(rule);
+ e->recirc_cnt--;
+}
+
+static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_indir_table_entry *e)
+{
+ int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version))
+ MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf);
+ else
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype);
+
+ if (attr->ip_version == 4) {
+ MLX5_SET_TO_ONES(fte_match_param, match,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ } else if (attr->ip_version == 6) {
+ memset(MLX5_ADDR_OF(fte_match_param, match,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, sizeof(struct in6_addr));
+ } else {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni);
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX);
+ e->recirc_grp = mlx5_create_flow_group(e->ft, in);
+ if (IS_ERR(e->recirc_grp)) {
+ err = PTR_ERR(e->recirc_grp);
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&e->recirc_rules);
+ e->recirc_cnt = 0;
+
+out:
+ kfree(in);
+ return err;
+}
+
+static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
+ struct mlx5_esw_indir_table_entry *e)
+{
+ int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ kfree(in);
+ return -ENOMEM;
+ }
+
+ /* Hold one entry */
+ MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
+ e->fwd_grp = mlx5_create_flow_group(e->ft, in);
+ if (IS_ERR(e->fwd_grp)) {
+ err = PTR_ERR(e->fwd_grp);
+ goto err_out;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = e->vport;
+ dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
+ e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(e->fwd_rule)) {
+ mlx5_destroy_flow_group(e->fwd_grp);
+ err = PTR_ERR(e->fwd_rule);
+ }
+
+err_out:
+ kfree(spec);
+ kfree(in);
+ return err;
+}
+
+static struct mlx5_esw_indir_table_entry *
+mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec, u16 vport, bool decap)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_esw_indir_table_entry *e;
+ struct mlx5_flow_table *ft;
+ int err = 0;
+
+ root_ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns)
+ return ERR_PTR(-ENOENT);
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+
+ ft_attr.prio = FDB_TC_OFFLOAD;
+ ft_attr.max_fte = MLX5_ESW_INDIR_TABLE_SIZE;
+ ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.level = 1;
+
+ ft = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto tbl_err;
+ }
+ e->ft = ft;
+ e->vport = vport;
+ e->ip_version = attr->ip_version;
+ e->fwd_ref = !decap;
+
+ err = mlx5_create_indir_recirc_group(esw, attr, spec, e);
+ if (err)
+ goto recirc_grp_err;
+
+ if (decap) {
+ err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
+ if (err)
+ goto recirc_rule_err;
+ }
+
+ err = mlx5_create_indir_fwd_group(esw, e);
+ if (err)
+ goto fwd_grp_err;
+
+ hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
+ vport << 16 | attr->ip_version);
+
+ return e;
+
+fwd_grp_err:
+ if (decap)
+ mlx5_esw_indir_table_rule_put(esw, attr, e);
+recirc_rule_err:
+ mlx5_destroy_flow_group(e->recirc_grp);
+recirc_grp_err:
+ mlx5_destroy_flow_table(e->ft);
+tbl_err:
+ kfree(e);
+ return ERR_PTR(err);
+}
+
+static struct mlx5_esw_indir_table_entry *
+mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version)
+{
+ struct mlx5_esw_indir_table_entry *e;
+ u32 key = vport << 16 | ip_version;
+
+ hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
+ if (e->vport == vport && e->ip_version == ip_version)
+ return e;
+
+ return NULL;
+}
+
+struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ u16 vport, bool decap)
+{
+ struct mlx5_esw_indir_table_entry *e;
+ int err;
+
+ mutex_lock(&esw->fdb_table.offloads.indir->lock);
+ e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
+ if (e) {
+ if (!decap) {
+ e->fwd_ref++;
+ } else {
+ err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
+ if (err)
+ goto out_err;
+ }
+ } else {
+ e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap);
+ if (IS_ERR(e)) {
+ err = PTR_ERR(e);
+ esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err);
+ goto out_err;
+ }
+ }
+ mutex_unlock(&esw->fdb_table.offloads.indir->lock);
+ return e->ft;
+
+out_err:
+ mutex_unlock(&esw->fdb_table.offloads.indir->lock);
+ return ERR_PTR(err);
+}
+
+void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport, bool decap)
+{
+ struct mlx5_esw_indir_table_entry *e;
+
+ mutex_lock(&esw->fdb_table.offloads.indir->lock);
+ e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
+ if (!e)
+ goto out;
+
+ if (!decap)
+ e->fwd_ref--;
+ else
+ mlx5_esw_indir_table_rule_put(esw, attr, e);
+
+ if (e->fwd_ref || e->recirc_cnt)
+ goto out;
+
+ hash_del(&e->hlist);
+ mlx5_destroy_flow_group(e->recirc_grp);
+ mlx5_del_flow_rules(e->fwd_rule);
+ mlx5_destroy_flow_group(e->fwd_grp);
+ mlx5_destroy_flow_table(e->ft);
+ kfree(e);
+out:
+ mutex_unlock(&esw->fdb_table.offloads.indir->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
new file mode 100644
index 000000000000..cb9eafd1b4ee
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_ESW_FT_H__
+#define __MLX5_ESW_FT_H__
+
+#ifdef CONFIG_MLX5_CLS_ACT
+
+struct mlx5_esw_indir_table *
+mlx5_esw_indir_table_init(void);
+void
+mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir);
+
+struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ u16 vport, bool decap);
+void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport, bool decap);
+
+bool
+mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport_num,
+ struct mlx5_core_dev *dest_mdev);
+
+u16
+mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr);
+
+#else
+/* indir API stubs */
+struct mlx5_esw_indir_table *
+mlx5_esw_indir_table_init(void)
+{
+ return NULL;
+}
+
+void
+mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
+{
+}
+
+static inline struct mlx5_flow_table *
+mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ u16 vport, bool decap)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport, bool decap)
+{
+}
+
+bool
+mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ u16 vport_num,
+ struct mlx5_core_dev *dest_mdev)
+{
+ return false;
+}
+
+static inline u16
+mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
+{
+ return 0;
+}
+#endif
+
+#endif /* __MLX5_ESW_FT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index da901e364656..aba17835465b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1042,8 +1042,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
void *vport_elem;
int err = 0;
- if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
- !MLX5_CAP_QOS(dev, esw_scheduling))
+ if (!esw->qos.enabled)
return 0;
if (vport->qos.enabled)
@@ -1273,8 +1272,8 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
esw_vport_cleanup_acl(esw, vport);
}
-static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
- enum mlx5_eswitch_vport_event enabled_events)
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int ret;
@@ -1301,6 +1300,13 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
vport->info.trusted = true;
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ ret = mlx5_esw_vport_vhca_id_set(esw, vport_num);
+ if (ret)
+ goto err_vhca_mapping;
+ }
+
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
@@ -1308,9 +1314,14 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
done:
mutex_unlock(&esw->state_lock);
return ret;
+
+err_vhca_mapping:
+ esw_vport_cleanup(esw, vport);
+ mutex_unlock(&esw->state_lock);
+ return ret;
}
-static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
@@ -1326,6 +1337,11 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
+
+ if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
+ MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ mlx5_esw_vport_vhca_id_clear(esw, vport_num);
+
/* We don't assume VFs will cleanup after themselves.
* Calling vport change handler while vport is disabled will cleanup
* the vport resources.
@@ -1366,9 +1382,15 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
+ u16 max_sf_vports;
u32 *out;
int err;
+ max_sf_vports = mlx5_sf_max_functions(dev);
+ /* Device interface is array of 64-bits */
+ if (max_sf_vports)
+ outlen += DIV_ROUND_UP(max_sf_vports, BITS_PER_TYPE(__be64)) * sizeof(__be64);
+
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return ERR_PTR(-ENOMEM);
@@ -1376,7 +1398,7 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
MLX5_SET(query_esw_functions_in, in, opcode,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
- err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
if (!err)
return out;
@@ -1426,7 +1448,7 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
{
int err;
- err = esw_enable_vport(esw, vport_num, enabled_events);
+ err = mlx5_esw_vport_enable(esw, vport_num, enabled_events);
if (err)
return err;
@@ -1437,14 +1459,14 @@ int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
return err;
err_rep:
- esw_disable_vport(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
return err;
}
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
esw_offloads_unload_rep(esw, vport_num);
- esw_disable_vport(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
}
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
@@ -1594,6 +1616,15 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
kvfree(out);
}
+static void mlx5_esw_mode_change_notify(struct mlx5_eswitch *esw, u16 mode)
+{
+ struct mlx5_esw_event_info info = {};
+
+ info.new_mode = mode;
+
+ blocking_notifier_call_chain(&esw->n_head, 0, &info);
+}
+
/**
* mlx5_eswitch_enable_locked - Enable eswitch
* @esw: Pointer to eswitch
@@ -1654,6 +1685,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
+ mlx5_esw_mode_change_notify(esw, mode);
+
return 0;
abort:
@@ -1710,6 +1743,11 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
+ /* Notify eswitch users that it is exiting from current mode.
+ * So that it can do necessary cleanup before the eswitch is disabled.
+ */
+ mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_NONE);
+
mlx5_eswitch_event_handlers_unregister(esw);
if (esw->mode == MLX5_ESWITCH_LEGACY)
@@ -1794,6 +1832,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
atomic64_set(&esw->offloads.num_flows, 0);
ida_init(&esw->offloads.vport_metadata_ida);
+ xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC);
mutex_init(&esw->state_lock);
mutex_init(&esw->mode_lock);
@@ -1810,6 +1849,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
dev->priv.eswitch = esw;
+ BLOCKING_INIT_NOTIFIER_HEAD(&esw->n_head);
return 0;
abort:
if (esw->work_queue)
@@ -1832,6 +1872,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
mutex_destroy(&esw->mode_lock);
mutex_destroy(&esw->state_lock);
+ WARN_ON(!xa_empty(&esw->offloads.vhca_map));
+ xa_destroy(&esw->offloads.vhca_map);
ida_destroy(&esw->offloads.vport_metadata_ida);
mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
mutex_destroy(&esw->offloads.encap_tbl_lock);
@@ -1899,7 +1941,8 @@ static bool
is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(esw, vport_num);
+ mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_esw_is_sf_vport(esw, vport_num);
}
int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
@@ -2500,4 +2543,12 @@ bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);
}
+int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&esw->n_head, nb);
+}
+void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&esw->n_head, nb);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index cf87de94418f..fdf5c8c05c1b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -36,6 +36,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <linux/atomic.h>
+#include <linux/xarray.h>
#include <net/devlink.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/eswitch.h>
@@ -43,6 +44,7 @@
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
#include "lib/fs_chains.h"
+#include "sf/sf.h"
#include "en/tc_ct.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -159,6 +161,8 @@ struct mlx5_vport {
struct devlink_port *dl_port;
};
+struct mlx5_esw_indir_table;
+
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -175,9 +179,11 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
+ struct mlx5_flow_group *send_to_vport_meta_grp;
struct mlx5_flow_group *peer_miss_grp;
struct mlx5_flow_handle **peer_miss_rules;
struct mlx5_flow_group *miss_grp;
+ struct mlx5_flow_handle **send_to_vport_meta_rules;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
@@ -189,6 +195,8 @@ struct mlx5_eswitch_fdb {
struct mutex lock;
} vports;
+ struct mlx5_esw_indir_table *indir;
+
} offloads;
};
u32 flags;
@@ -211,6 +219,7 @@ struct mlx5_esw_offload {
struct mod_hdr_tbl mod_hdr;
DECLARE_HASHTABLE(termtbl_tbl, 8);
struct mutex termtbl_mutex; /* protects termtbl hash */
+ struct xarray vhca_map;
const struct mlx5_eswitch_rep_ops *rep_ops[NUM_REP_TYPES];
u8 inline_mode;
atomic64_t num_flows;
@@ -277,6 +286,7 @@ struct mlx5_eswitch {
struct {
u32 large_group_num;
} params;
+ struct blocking_notifier_head n_head;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -385,12 +395,14 @@ enum mlx5_flow_match_level {
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),
MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE = BIT(2),
};
enum {
MLX5_ESW_ATTR_FLAG_VLAN_HANDLED = BIT(0),
MLX5_ESW_ATTR_FLAG_SLOW_PATH = BIT(1),
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
+ MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
};
struct mlx5_esw_flow_attr {
@@ -411,7 +423,9 @@ struct mlx5_esw_flow_attr {
struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_core_dev *mdev;
struct mlx5_termtbl_handle *termtbl;
+ int src_port_rewrite_act_id;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
+ struct mlx5_rx_tun_attr *rx_tun_attr;
struct mlx5_pkt_reformat *decap_pkt_reformat;
};
@@ -499,6 +513,40 @@ static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
MLX5_VPORT_PF : MLX5_VPORT_FIRST_VF;
}
+static inline int mlx5_esw_sf_start_idx(const struct mlx5_eswitch *esw)
+{
+ /* PF and VF vports indices start from 0 to max_vfs */
+ return MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
+}
+
+static inline int mlx5_esw_sf_end_idx(const struct mlx5_eswitch *esw)
+{
+ return mlx5_esw_sf_start_idx(esw) + mlx5_sf_max_functions(esw->dev);
+}
+
+static inline int
+mlx5_esw_sf_vport_num_to_index(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num - mlx5_sf_start_function_id(esw->dev) +
+ MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev);
+}
+
+static inline u16
+mlx5_esw_sf_vport_index_to_num(const struct mlx5_eswitch *esw, int idx)
+{
+ return mlx5_sf_start_function_id(esw->dev) + idx -
+ (MLX5_VPORT_PF_PLACEHOLDER + mlx5_core_max_vfs(esw->dev));
+}
+
+static inline bool
+mlx5_esw_is_sf_vport(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_sf_supported(esw->dev) &&
+ vport_num >= mlx5_sf_start_function_id(esw->dev) &&
+ (vport_num < (mlx5_sf_start_function_id(esw->dev) +
+ mlx5_sf_max_functions(esw->dev)));
+}
+
static inline bool mlx5_eswitch_is_funcs_handler(const struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev);
@@ -527,6 +575,10 @@ static inline int mlx5_eswitch_vport_num_to_index(struct mlx5_eswitch *esw,
if (vport_num == MLX5_VPORT_UPLINK)
return mlx5_eswitch_uplink_idx(esw);
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ return mlx5_esw_sf_vport_num_to_index(esw, vport_num);
+
+ /* PF and VF vports start from 0 to max_vfs */
return vport_num;
}
@@ -540,6 +592,12 @@ static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
if (index == mlx5_eswitch_uplink_idx(esw))
return MLX5_VPORT_UPLINK;
+ /* SF vports indices are after VFs and before ECPF */
+ if (mlx5_sf_supported(esw->dev) &&
+ index > mlx5_core_max_vfs(esw->dev))
+ return mlx5_esw_sf_vport_index_to_num(esw, index);
+
+ /* PF and VF vports start from 0 to max_vfs */
return index;
}
@@ -625,6 +683,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
for ((vport) = (nvfs); \
(vport) >= (esw)->first_host_vport; (vport)--)
+#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
+ for ((i) = mlx5_esw_sf_start_idx(esw); \
+ (rep) = &(esw)->offloads.vport_reps[(i)], \
+ (i) < mlx5_esw_sf_end_idx(esw); (i++))
+
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
@@ -638,6 +701,10 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
+int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
+ enum mlx5_eswitch_vport_event enabled_events);
+void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport);
@@ -656,6 +723,9 @@ esw_get_max_restore_tag(struct mlx5_eswitch *esw);
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num);
+
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num);
@@ -667,6 +737,30 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum);
+void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum);
+void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num);
+
+int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num);
+int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num);
+
+/**
+ * mlx5_esw_event_info - Indicates eswitch mode changed/changing.
+ *
+ * @new_mode: New mode of eswitch.
+ */
+struct mlx5_esw_event_info {
+ u16 new_mode;
+};
+
+int mlx5_esw_event_notifier_register(struct mlx5_eswitch *esw, struct notifier_block *n);
+void mlx5_esw_event_notifier_unregister(struct mlx5_eswitch *esw, struct notifier_block *n);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 2f6a0ae20650..94cb0217b4f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -38,7 +38,9 @@
#include <linux/mlx5/fs.h>
#include "mlx5_core.h"
#include "eswitch.h"
+#include "esw/indir_table.h"
#include "esw/acl/ofld.h"
+#include "esw/indir_table.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
@@ -257,7 +259,9 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr,
+ struct mlx5_eswitch *src_esw,
+ u16 vport)
{
void *misc2;
void *misc;
@@ -266,10 +270,12 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
* VHCA in dual-port RoCE mode, and matching on source vport may fail.
*/
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ vport = mlx5_esw_indir_table_decap_vport(attr);
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(attr->in_mdev->priv.eswitch,
- attr->in_rep->vport));
+ mlx5_eswitch_get_vport_metadata_for_match(src_esw,
+ vport));
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
@@ -278,12 +284,12 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
MLX5_SET(fte_match_set_misc, misc,
source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(attr->in_mdev, vhca_id));
+ MLX5_CAP_GEN(src_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -295,6 +301,299 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
}
}
+static int
+esw_setup_decap_indir(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec)
+{
+ struct mlx5_flow_table *ft;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ mlx5_esw_indir_table_decap_vport(attr), true);
+ return PTR_ERR_OR_ZERO(ft);
+}
+
+static void
+esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ mlx5_esw_indir_table_put(esw, attr,
+ mlx5_esw_indir_table_decap_vport(attr),
+ true);
+}
+
+static int
+esw_setup_ft_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ int i)
+{
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = attr->dest_ft;
+
+ if (mlx5_esw_indir_table_decap_vport(attr))
+ return esw_setup_decap_indir(esw, attr, spec);
+ return 0;
+}
+
+static void
+esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains,
+ int i)
+{
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
+}
+
+static int
+esw_setup_chain_dest(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level,
+ int i)
+{
+ struct mlx5_flow_table *ft;
+
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ ft = mlx5_chains_get_table(chains, chain, prio, level);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[i].ft = ft;
+ return 0;
+}
+
+static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
+ int from, int to)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ int i;
+
+ for (i = from; i < to; i++)
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ esw_attr->dests[i].mdev))
+ mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
+ false);
+}
+
+static bool
+esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
+{
+ int i;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ return true;
+ return false;
+}
+
+static int
+esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_fs_chains *chains,
+ struct mlx5_flow_attr *attr,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int j, err;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
+ if (err)
+ goto err_setup_chain;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
+ }
+ return 0;
+
+err_setup_chain:
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
+ return err;
+}
+
+static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
+}
+
+static bool
+esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int i;
+
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ esw_attr->dests[i].mdev))
+ return true;
+ return false;
+}
+
+static int
+esw_setup_indir_table(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ bool ignore_flow_lvl,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ int j, err;
+
+ if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
+ return -EOPNOTSUPP;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
+ if (ignore_flow_lvl)
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+ dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ esw_attr->dests[j].rep->vport, false);
+ if (IS_ERR(dest[*i].ft)) {
+ err = PTR_ERR(dest[*i].ft);
+ goto err_indir_tbl_get;
+ }
+ }
+
+ if (mlx5_esw_indir_table_decap_vport(attr)) {
+ err = esw_setup_decap_indir(esw, attr, spec);
+ if (err)
+ goto err_indir_tbl_get;
+ }
+
+ return 0;
+
+err_indir_tbl_get:
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
+ return err;
+}
+
+static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+
+ esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
+ esw_cleanup_decap_indir(esw, attr);
+}
+
+static void
+esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
+{
+ mlx5_chains_put_table(chains, chain, prio, level);
+}
+
+static void
+esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int attr_idx, int dest_idx, bool pkt_reformat)
+{
+ dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
+ dest[dest_idx].vport.vhca_id =
+ MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
+ if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
+ dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
+ if (pkt_reformat) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+ dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+ dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
+ }
+}
+
+static int
+esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
+ int i)
+{
+ int j;
+
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
+ esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
+ return i;
+}
+
+static int
+esw_setup_dests(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr,
+ struct mlx5_flow_spec *spec,
+ int *i)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ int err = 0;
+
+ if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
+ MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
+ mlx5_eswitch_vport_match_metadata_enabled(esw))
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
+
+ if (attr->dest_ft) {
+ esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ (*i)++;
+ } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ esw_setup_slow_path_dest(dest, flow_act, chains, *i);
+ (*i)++;
+ } else if (attr->dest_chain) {
+ err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
+ 1, 0, *i);
+ (*i)++;
+ } else if (esw_is_indir_table(esw, attr)) {
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
+ } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
+ err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
+ } else {
+ *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
+ }
+
+ return err;
+}
+
+static void
+esw_cleanup_dests(struct mlx5_eswitch *esw,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+
+ if (attr->dest_ft) {
+ esw_cleanup_decap_indir(esw, attr);
+ } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ if (attr->dest_chain)
+ esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
+ else if (esw_is_indir_table(esw, attr))
+ esw_cleanup_indir_table(esw, attr);
+ else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+ esw_cleanup_chain_src_port_rewrite(esw, attr);
+ }
+}
+
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
@@ -308,7 +607,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
- int j, i = 0;
+ int i = 0;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
@@ -329,50 +628,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
}
}
+ mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
+
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- struct mlx5_flow_table *ft;
-
- if (attr->dest_ft) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = attr->dest_ft;
- i++;
- } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
- i++;
- } else if (attr->dest_chain) {
- flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- ft = mlx5_chains_get_table(chains, attr->dest_chain,
- 1, 0);
- if (IS_ERR(ft)) {
- rule = ERR_CAST(ft);
- goto err_create_goto_table;
- }
-
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = ft;
- i++;
- } else {
- for (j = esw_attr->split_count; j < esw_attr->out_count; j++) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = esw_attr->dests[j].rep->vport;
- dest[i].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- dest[i].vport.flags |=
- MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act.pkt_reformat =
- esw_attr->dests[j].pkt_reformat;
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat =
- esw_attr->dests[j].pkt_reformat;
- }
- i++;
- }
+ int err;
+
+ err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_create_goto_table;
}
}
@@ -407,15 +671,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
fdb = attr->ft;
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
- mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr,
+ esw_attr->in_mdev->priv.eswitch,
+ esw_attr->in_rep->vport);
}
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
- mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
-
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
@@ -434,8 +698,7 @@ err_add_rule:
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
- mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
+ esw_cleanup_dests(esw, attr);
err_create_goto_table:
return rule;
}
@@ -453,7 +716,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
- int i;
+ int i, err = 0;
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
@@ -472,22 +735,26 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
- dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = esw_attr->dests[i].rep->vport;
- dest[i].vport.vhca_id =
- MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
- dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat;
+ if (esw_is_indir_table(esw, attr))
+ err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
+ else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
+ err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
+ &i);
+ else
+ esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
+
+ if (err) {
+ rule = ERR_PTR(err);
+ goto err_chain_src_rewrite;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb;
i++;
- mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, attr,
+ esw_attr->in_mdev->priv.eswitch,
+ esw_attr->in_rep->vport);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -495,13 +762,16 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
- if (IS_ERR(rule))
- goto add_err;
+ if (IS_ERR(rule)) {
+ i = esw_attr->split_count;
+ goto err_chain_src_rewrite;
+ }
atomic64_inc(&esw->offloads.num_flows);
return rule;
-add_err:
+err_chain_src_rewrite:
+ esw_put_dest_tables_loop(esw, attr, 0, i);
esw_vport_tbl_put(esw, &fwd_attr);
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
@@ -542,13 +812,13 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (fwd_rule) {
esw_vport_tbl_put(esw, &fwd_attr);
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
+ esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
} else {
if (split)
esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
- if (attr->dest_chain)
- mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
+ esw_cleanup_dests(esw, attr);
}
}
@@ -810,6 +1080,81 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
+static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
+ int i = 0, num_vfs = esw->esw_funcs.num_vfs, vport_num;
+
+ if (!num_vfs || !flows)
+ return;
+
+ mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs)
+ mlx5_del_flow_rules(flows[i++]);
+
+ kvfree(flows);
+}
+
+static int
+mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
+{
+ int num_vfs, vport_num, rule_idx = 0, err = 0;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_handle **flows;
+ struct mlx5_flow_spec *spec;
+
+ num_vfs = esw->esw_funcs.num_vfs;
+ flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
+ if (!flows)
+ return -ENOMEM;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
+ ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ mlx5_esw_for_each_vf_vport_num(esw, vport_num, num_vfs) {
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
+ dest.vport.num = vport_num;
+
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
+ rule_idx, PTR_ERR(flow_rule));
+ goto rule_err;
+ }
+ flows[rule_idx++] = flow_rule;
+ }
+
+ esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
+ kvfree(spec);
+ return 0;
+
+rule_err:
+ while (--rule_idx >= 0)
+ mlx5_del_flow_rules(flows[rule_idx]);
+ kvfree(spec);
+alloc_err:
+ kvfree(flows);
+ return err;
+}
+
static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
{
return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
@@ -1292,11 +1637,11 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_table_attr ft_attr = {};
+ int num_vfs, table_size, ix, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
u32 flags = 0, *flow_group_in;
- int table_size, ix, err = 0;
struct mlx5_flow_group *g;
void *match_criteria;
u8 *dmac;
@@ -1322,7 +1667,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
- MLX5_ESW_MISS_FLOWS + esw->total_vports;
+ MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -1370,6 +1715,38 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.send_to_vport_grp = g;
+ /* meta send to vport */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS_2);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ num_vfs = esw->esw_funcs.num_vfs;
+ if (num_vfs) {
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + num_vfs - 1);
+ ix += num_vfs;
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
+ err);
+ goto send_vport_meta_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_meta_grp = g;
+
+ err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
+ if (err)
+ goto meta_rule_err;
+ }
+
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
/* create peer esw miss group */
memset(flow_group_in, 0, inlen);
@@ -1437,6 +1814,11 @@ miss_err:
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
peer_miss_err:
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
+meta_rule_err:
+ if (esw->fdb_table.offloads.send_to_vport_meta_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
+send_vport_meta_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
esw_chains_destroy(esw, esw_chains(esw));
@@ -1458,7 +1840,10 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
+ mlx5_eswitch_del_send_to_vport_meta_rules(esw);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ if (esw->fdb_table.offloads.send_to_vport_meta_grp)
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
@@ -1800,11 +2185,22 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type]->unload(rep);
}
+static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ struct mlx5_eswitch_rep *rep;
+ int i;
+
+ mlx5_esw_for_each_sf_rep(esw, i, rep)
+ __esw_offloads_unload_rep(esw, rep, rep_type);
+}
+
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
int i;
+ __unload_reps_sf_vport(esw, rep_type);
+
mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, esw->esw_funcs.num_vfs)
__esw_offloads_unload_rep(esw, rep, rep_type);
@@ -1822,7 +2218,7 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -1846,7 +2242,7 @@ err_reps:
return err;
}
-static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
@@ -2171,12 +2567,20 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
{
+ struct mlx5_esw_indir_table *indir;
int err;
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.vports.lock);
hash_init(esw->fdb_table.offloads.vports.table);
+ indir = mlx5_esw_indir_table_init();
+ if (IS_ERR(indir)) {
+ err = PTR_ERR(indir);
+ goto create_indir_err;
+ }
+ esw->fdb_table.offloads.indir = indir;
+
err = esw_create_uplink_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
@@ -2208,6 +2612,8 @@ create_restore_err:
create_offloads_err:
esw_destroy_uplink_offloads_acl_tables(esw);
create_acl_err:
+ mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
+create_indir_err:
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
return err;
}
@@ -2219,6 +2625,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
esw_destroy_uplink_offloads_acl_tables(esw);
+ mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
@@ -2824,3 +3231,126 @@ u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
}
EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
+
+int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
+ u16 vport_num, u32 sfnum)
+{
+ int err;
+
+ err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
+ if (err)
+ return err;
+
+ err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, sfnum);
+ if (err)
+ goto devlink_err;
+
+ err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ if (err)
+ goto rep_err;
+ return 0;
+
+rep_err:
+ mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
+devlink_err:
+ mlx5_esw_vport_disable(esw, vport_num);
+ return err;
+}
+
+void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
+ mlx5_esw_vport_disable(esw, vport_num);
+}
+
+static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
+{
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *query_ctx;
+ void *hca_caps;
+ int err;
+
+ *vhca_id = 0;
+ if (mlx5_esw_is_manager_vport(esw, vport_num) ||
+ !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
+ return -EPERM;
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
+ if (err)
+ goto out_free;
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
+
+out_free:
+ kfree(query_ctx);
+ return err;
+}
+
+int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ u16 *old_entry, *vhca_map_entry, vhca_id;
+ int err;
+
+ err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ if (err) {
+ esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
+ vport_num, err);
+ return err;
+ }
+
+ vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
+ if (!vhca_map_entry)
+ return -ENOMEM;
+
+ *vhca_map_entry = vport_num;
+ old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
+ if (xa_is_err(old_entry)) {
+ kfree(vhca_map_entry);
+ return xa_err(old_entry);
+ }
+ kfree(old_entry);
+ return 0;
+}
+
+void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ u16 *vhca_map_entry, vhca_id;
+ int err;
+
+ err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
+ if (err)
+ esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
+ vport_num, err);
+
+ vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
+ kfree(vhca_map_entry);
+}
+
+int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
+{
+ u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
+
+ if (!res)
+ return -ENOENT;
+
+ *vport_num = *res;
+ return 0;
+}
+
+u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
+ u16 vport_num)
+{
+ struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+
+ if (WARN_ON_ONCE(IS_ERR(vport)))
+ return 0;
+
+ return vport->metadata;
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 3ce17c3d7a00..d713ae24d6b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -23,7 +23,7 @@ static int temp_warn(struct notifier_block *, unsigned long, void *);
static int port_module(struct notifier_block *, unsigned long, void *);
static int pcie_core(struct notifier_block *, unsigned long, void *);
-/* handler which forwards the event to events->nh, driver notifiers */
+/* handler which forwards the event to events->fw_nh, driver notifiers */
static int forward_event(struct notifier_block *, unsigned long, void *);
static struct mlx5_nb events_nbs_ref[] = {
@@ -55,12 +55,14 @@ struct mlx5_events {
struct mlx5_core_dev *dev;
struct workqueue_struct *wq;
struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
- /* driver notifier chain */
- struct atomic_notifier_head nh;
+ /* driver notifier chain for fw events */
+ struct atomic_notifier_head fw_nh;
/* port module events stats */
struct mlx5_pme_stats pme_stats;
/*pcie_core*/
struct work_struct pcie_core_work;
+ /* driver notifier chain for sw events */
+ struct blocking_notifier_head sw_nh;
};
static const char *eqe_type_str(u8 type)
@@ -110,6 +112,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_CMD";
case MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED:
return "MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED";
+ case MLX5_EVENT_TYPE_VHCA_STATE_CHANGE:
+ return "MLX5_EVENT_TYPE_VHCA_STATE_CHANGE";
case MLX5_EVENT_TYPE_PAGE_REQUEST:
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
@@ -331,7 +335,7 @@ static int forward_event(struct notifier_block *nb, unsigned long event, void *d
mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n",
eqe_type_str(eqe->type), eqe->sub_type);
- atomic_notifier_call_chain(&events->nh, event, data);
+ atomic_notifier_call_chain(&events->fw_nh, event, data);
return NOTIFY_OK;
}
@@ -342,7 +346,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
if (!events)
return -ENOMEM;
- ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
+ ATOMIC_INIT_NOTIFIER_HEAD(&events->fw_nh);
events->dev = dev;
dev->priv.events = events;
events->wq = create_singlethread_workqueue("mlx5_events");
@@ -351,6 +355,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
return -ENOMEM;
}
INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
+ BLOCKING_INIT_NOTIFIER_HEAD(&events->sw_nh);
return 0;
}
@@ -383,11 +388,14 @@ void mlx5_events_stop(struct mlx5_core_dev *dev)
flush_workqueue(events->wq);
}
+/* This API is used only for processing and forwarding firmware
+ * events to mlx5 consumer.
+ */
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
struct mlx5_events *events = dev->priv.events;
- return atomic_notifier_chain_register(&events->nh, nb);
+ return atomic_notifier_chain_register(&events->fw_nh, nb);
}
EXPORT_SYMBOL(mlx5_notifier_register);
@@ -395,11 +403,41 @@ int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *n
{
struct mlx5_events *events = dev->priv.events;
- return atomic_notifier_chain_unregister(&events->nh, nb);
+ return atomic_notifier_chain_unregister(&events->fw_nh, nb);
}
EXPORT_SYMBOL(mlx5_notifier_unregister);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data)
{
- return atomic_notifier_call_chain(&events->nh, event, data);
+ return atomic_notifier_call_chain(&events->fw_nh, event, data);
+}
+
+/* This API is used only for processing and forwarding driver-specific
+ * events to mlx5 consumers.
+ */
+int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_chain_register(&events->sw_nh, nb);
+}
+
+int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_chain_unregister(&events->sw_nh, nb);
+}
+
+int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
+ void *data)
+{
+ struct mlx5_events *events = dev->priv.events;
+
+ return blocking_notifier_call_chain(&events->sw_nh, event, data);
+}
+
+void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work)
+{
+ queue_work(dev->priv.events->wq, work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
index cc67366495b0..22bee4990232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
@@ -124,7 +124,7 @@ struct mlx5_fpga_ipsec {
struct ida halloc;
};
-static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
+bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
{
if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
index db88eb4c49e3..8931b5584477 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.h
@@ -43,6 +43,7 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev);
const struct mlx5_flow_cmds *
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type);
void mlx5_fpga_ipsec_build_fs_cmds(void);
+bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev);
#else
static inline
const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
@@ -55,6 +56,7 @@ mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
}
static inline void mlx5_fpga_ipsec_build_fs_cmds(void) {};
+static inline bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev) { return false; }
#endif /* CONFIG_MLX5_FPGA_IPSEC */
#endif /* __MLX5_FPGA_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index ee4d86c1f436..66ad599bd488 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -105,8 +105,8 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 6
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 7
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -572,7 +572,7 @@ static void del_hw_fte(struct fs_node *node)
mlx5_core_warn(dev,
"flow steering can't delete fte in index %d of flow group id %d\n",
fte->index, fg->id);
- node->active = 0;
+ node->active = false;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 54523bed16cd..0c32c485eb58 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -190,6 +190,16 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
return true;
}
+static void enter_error_state(struct mlx5_core_dev *dev, bool force)
+{
+ if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ mlx5_cmd_flush(dev);
+ }
+
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
+}
+
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
{
bool err_detected = false;
@@ -208,12 +218,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
goto unlock;
}
- if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
- dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
- mlx5_cmd_flush(dev);
- }
-
- mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1);
+ enter_error_state(dev, force);
unlock:
mutex_unlock(&dev->intf_state_mutex);
}
@@ -613,7 +618,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
- mlx5_enter_error_state(dev, false);
+ enter_error_state(dev, false);
if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
if (mlx5_health_try_recover(dev))
mlx5_core_err(dev, "health recovery failed\n");
@@ -707,8 +712,9 @@ static void poll_health(struct timer_list *t)
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
dev->priv.health.fatal_error = fatal_error;
print_health_info(dev);
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mlx5_trigger_health_work(dev);
- goto out;
+ return;
}
count = ioread32be(health->health_counter);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 97b5fcb1f406..1eeca45cfcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -72,23 +72,15 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-int mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
- int err;
-
- err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
- if (err)
- return err;
+ netif_carrier_off(netdev);
mlx5e_set_netdev_mtu_boundaries(priv);
netdev->mtu = netdev->max_mtu;
- mlx5e_build_nic_params(priv, NULL, &priv->rss_params, &priv->channels.params,
- netdev->mtu);
+ mlx5e_build_nic_params(priv, NULL, netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -112,7 +104,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
void mlx5i_cleanup(struct mlx5e_priv *priv)
{
- mlx5e_netdev_cleanup(priv->netdev, priv);
+ mlx5e_priv_cleanup(priv);
}
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
@@ -753,7 +745,14 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u8 port_num,
goto destroy_ht;
}
- prof->init(mdev, netdev, prof, ipriv);
+ err = mlx5e_priv_init(epriv, netdev, mdev);
+ if (err)
+ goto destroy_mdev_resources;
+
+ epriv->profile = prof;
+ epriv->ppriv = ipriv;
+
+ prof->init(mdev, netdev);
err = mlx5e_attach_netdev(epriv);
if (err)
@@ -777,6 +776,7 @@ detach:
prof->cleanup(epriv);
if (ipriv->sub_interface)
return err;
+destroy_mdev_resources:
mlx5e_destroy_mdev_resources(mdev);
destroy_ht:
mlx5i_pkey_qpn_ht_cleanup(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index b79dc1e28c41..99d46fda9f82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -87,10 +87,7 @@ void mlx5i_dev_cleanup(struct net_device *dev);
int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
/* Parent profile functions */
-int mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv);
+int mlx5i_init(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5i_cleanup(struct mlx5e_priv *priv);
int mlx5i_update_nic_rx(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 7163d9f6c4a6..3d0a18a0bed4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -276,14 +276,12 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
/* Called directly after IPoIB netdevice was created to initialize SW structs */
static int mlx5i_pkey_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+ struct net_device *netdev)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
int err;
- err = mlx5i_init(mdev, netdev, profile, ppriv);
+ err = mlx5i_init(mdev, netdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index c70c1f0ca0c1..b0e129d0f6d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -67,39 +67,68 @@ enum {
MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
};
-static u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev,
- struct ptp_system_timestamp *sts)
+static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
+{
+ return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
+}
+
+static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
+}
+
+static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
+{
+ u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+
+ if (!MLX5_CAP_MCAM_REG(dev, mtutc))
+ return -EOPNOTSUPP;
+
+ return mlx5_core_access_reg(dev, mtutc, size, out, sizeof(out),
+ MLX5_REG_MTUTC, 0, 1);
+}
+
+static u64 mlx5_read_time(struct mlx5_core_dev *dev,
+ struct ptp_system_timestamp *sts,
+ bool real_time)
{
u32 timer_h, timer_h1, timer_l;
- timer_h = ioread32be(&dev->iseg->internal_timer_h);
+ timer_h = ioread32be(real_time ? &dev->iseg->real_time_h :
+ &dev->iseg->internal_timer_h);
ptp_read_system_prets(sts);
- timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
+ &dev->iseg->internal_timer_l);
ptp_read_system_postts(sts);
- timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
+ timer_h1 = ioread32be(real_time ? &dev->iseg->real_time_h :
+ &dev->iseg->internal_timer_h);
if (timer_h != timer_h1) {
/* wrap around */
ptp_read_system_prets(sts);
- timer_l = ioread32be(&dev->iseg->internal_timer_l);
+ timer_l = ioread32be(real_time ? &dev->iseg->real_time_l :
+ &dev->iseg->internal_timer_l);
ptp_read_system_postts(sts);
}
- return (u64)timer_l | (u64)timer_h1 << 32;
+ return real_time ? REAL_TIME_TO_NS(timer_h1, timer_l) :
+ (u64)timer_l | (u64)timer_h1 << 32;
}
static u64 read_internal_timer(const struct cyclecounter *cc)
{
- struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
+ struct mlx5_timer *timer = container_of(cc, struct mlx5_timer, cycles);
+ struct mlx5_clock *clock = container_of(timer, struct mlx5_clock, timer);
struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
clock);
- return mlx5_read_internal_timer(mdev, NULL) & cc->mask;
+ return mlx5_read_time(mdev, NULL, false) & cc->mask;
}
static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
{
struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_timer *timer;
u32 sign;
if (!clock_info)
@@ -109,10 +138,11 @@ static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev)
smp_store_mb(clock_info->sign,
sign | MLX5_IB_CLOCK_INFO_KERNEL_UPDATING);
- clock_info->cycles = clock->tc.cycle_last;
- clock_info->mult = clock->cycles.mult;
- clock_info->nsec = clock->tc.nsec;
- clock_info->frac = clock->tc.frac;
+ timer = &clock->timer;
+ clock_info->cycles = timer->tc.cycle_last;
+ clock_info->mult = timer->cycles.mult;
+ clock_info->nsec = timer->tc.nsec;
+ clock_info->frac = timer->tc.frac;
smp_store_release(&clock_info->sign,
sign + MLX5_IB_CLOCK_INFO_KERNEL_UPDATING * 2);
@@ -151,92 +181,184 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5_core_dev *mdev;
+ struct mlx5_timer *timer;
struct mlx5_clock *clock;
unsigned long flags;
- clock = container_of(dwork, struct mlx5_clock, overflow_work);
+ timer = container_of(dwork, struct mlx5_timer, overflow_work);
+ clock = container_of(timer, struct mlx5_clock, timer);
mdev = container_of(clock, struct mlx5_core_dev, clock);
+
write_seqlock_irqsave(&clock->lock, flags);
- timecounter_read(&clock->tc);
+ timecounter_read(&timer->tc);
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
- schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
+ schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
+}
+
+static int mlx5_ptp_settime_real_time(struct mlx5_core_dev *mdev,
+ const struct timespec64 *ts)
+{
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+
+ if (!mlx5_modify_mtutc_allowed(mdev))
+ return 0;
+
+ if (ts->tv_sec < 0 || ts->tv_sec > U32_MAX ||
+ ts->tv_nsec < 0 || ts->tv_nsec > NSEC_PER_SEC)
+ return -EINVAL;
+
+ MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_SET_TIME_IMMEDIATE);
+ MLX5_SET(mtutc_reg, in, utc_sec, ts->tv_sec);
+ MLX5_SET(mtutc_reg, in, utc_nsec, ts->tv_nsec);
+
+ return mlx5_set_mtutc(mdev, in, sizeof(in));
}
static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
- u64 ns = timespec64_to_ns(ts);
+ struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
+ err = mlx5_ptp_settime_real_time(mdev, ts);
+ if (err)
+ return err;
+
write_seqlock_irqsave(&clock->lock, flags);
- timecounter_init(&clock->tc, &clock->cycles, ns);
+ timecounter_init(&timer->tc, &timer->cycles, timespec64_to_ns(ts));
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
+static
+struct timespec64 mlx5_ptp_gettimex_real_time(struct mlx5_core_dev *mdev,
+ struct ptp_system_timestamp *sts)
+{
+ struct timespec64 ts;
+ u64 time;
+
+ time = mlx5_read_time(mdev, sts, true);
+ ts = ns_to_timespec64(time);
+ return ts;
+}
+
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
u64 cycles, ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
+ if (mlx5_real_time_mode(mdev)) {
+ *ts = mlx5_ptp_gettimex_real_time(mdev, sts);
+ goto out;
+ }
+
write_seqlock_irqsave(&clock->lock, flags);
- cycles = mlx5_read_internal_timer(mdev, sts);
- ns = timecounter_cyc2time(&clock->tc, cycles);
+ cycles = mlx5_read_time(mdev, sts, false);
+ ns = timecounter_cyc2time(&timer->tc, cycles);
write_sequnlock_irqrestore(&clock->lock, flags);
-
*ts = ns_to_timespec64(ns);
-
+out:
return 0;
}
+static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
+{
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+
+ if (!mlx5_modify_mtutc_allowed(mdev))
+ return 0;
+
+ /* HW time adjustment range is s16. If out of range, settime instead */
+ if (delta < S16_MIN || delta > S16_MAX) {
+ struct timespec64 ts;
+ s64 ns;
+
+ ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
+ ns = timespec64_to_ns(&ts) + delta;
+ ts = ns_to_timespec64(ns);
+ return mlx5_ptp_settime_real_time(mdev, &ts);
+ }
+
+ MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_TIME);
+ MLX5_SET(mtutc_reg, in, time_adjustment, delta);
+
+ return mlx5_set_mtutc(mdev, in, sizeof(in));
+}
+
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
+ int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ err = mlx5_ptp_adjtime_real_time(mdev, delta);
+ if (err)
+ return err;
write_seqlock_irqsave(&clock->lock, flags);
- timecounter_adjtime(&clock->tc, delta);
+ timecounter_adjtime(&timer->tc, delta);
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
}
+static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
+{
+ u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
+
+ if (!mlx5_modify_mtutc_allowed(mdev))
+ return 0;
+
+ MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
+
+ return mlx5_set_mtutc(mdev, in, sizeof(in));
+}
+
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_timer *timer = &clock->timer;
struct mlx5_core_dev *mdev;
unsigned long flags;
int neg_adj = 0;
u32 diff;
u64 adj;
+ int err;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+ err = mlx5_ptp_adjfreq_real_time(mdev, delta);
+ if (err)
+ return err;
if (delta < 0) {
neg_adj = 1;
delta = -delta;
}
- adj = clock->nominal_c_mult;
+ adj = timer->nominal_c_mult;
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
- mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
- timecounter_read(&clock->tc);
- clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
- clock->nominal_c_mult + diff;
+ timecounter_read(&timer->tc);
+ timer->cycles.mult = neg_adj ? timer->nominal_c_mult - diff :
+ timer->nominal_c_mult + diff;
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
@@ -305,6 +427,45 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
MLX5_EVENT_MODE_REPETETIVE & on);
}
+static u64 find_target_cycles(struct mlx5_core_dev *mdev, s64 target_ns)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u64 cycles_now, cycles_delta;
+ u64 nsec_now, nsec_delta;
+ struct mlx5_timer *timer;
+ unsigned long flags;
+
+ timer = &clock->timer;
+
+ cycles_now = mlx5_read_time(mdev, NULL, false);
+ write_seqlock_irqsave(&clock->lock, flags);
+ nsec_now = timecounter_cyc2time(&timer->tc, cycles_now);
+ nsec_delta = target_ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << timer->cycles.shift,
+ timer->cycles.mult);
+ write_sequnlock_irqrestore(&clock->lock, flags);
+
+ return cycles_now + cycles_delta;
+}
+
+static u64 perout_conf_internal_timer(struct mlx5_core_dev *mdev,
+ s64 sec, u32 nsec)
+{
+ struct timespec64 ts;
+ s64 target_ns;
+
+ ts.tv_sec = sec;
+ ts.tv_nsec = nsec;
+ target_ns = timespec64_to_ns(&ts);
+
+ return find_target_cycles(mdev, target_ns);
+}
+
+static u64 perout_conf_real_time(s64 sec, u32 nsec)
+{
+ return (u64)nsec | (u64)sec << 32;
+}
+
static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq,
int on)
@@ -314,11 +475,9 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
struct mlx5_core_dev *mdev =
container_of(clock, struct mlx5_core_dev, clock);
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u64 nsec_now, nsec_delta, time_stamp = 0;
- u64 cycles_now, cycles_delta;
struct timespec64 ts;
- unsigned long flags;
u32 field_select = 0;
+ u64 time_stamp = 0;
u8 pin_mode = 0;
u8 pattern = 0;
int pin = -1;
@@ -335,12 +494,16 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
-
+ field_select = MLX5_MTPPS_FS_ENABLE;
if (on) {
+ bool rt_mode = mlx5_real_time_mode(mdev);
+ u32 nsec;
+ s64 sec;
+
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
@@ -350,23 +513,18 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if ((ns >> 1) != 500000000LL)
return -EINVAL;
- ts.tv_sec = rq->perout.start.sec;
- ts.tv_nsec = rq->perout.start.nsec;
- ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(mdev, NULL);
- write_seqlock_irqsave(&clock->lock, flags);
- nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
- clock->cycles.mult);
- write_sequnlock_irqrestore(&clock->lock, flags);
- time_stamp = cycles_now + cycles_delta;
- field_select = MLX5_MTPPS_FS_PIN_MODE |
- MLX5_MTPPS_FS_PATTERN |
- MLX5_MTPPS_FS_ENABLE |
- MLX5_MTPPS_FS_TIME_STAMP;
- } else {
- field_select = MLX5_MTPPS_FS_ENABLE;
+ nsec = rq->perout.start.nsec;
+ sec = rq->perout.start.sec;
+
+ if (rt_mode && sec > U32_MAX)
+ return -EINVAL;
+
+ time_stamp = rt_mode ? perout_conf_real_time(sec, nsec) :
+ perout_conf_internal_timer(mdev, sec, nsec);
+
+ field_select |= MLX5_MTPPS_FS_PIN_MODE |
+ MLX5_MTPPS_FS_PATTERN |
+ MLX5_MTPPS_FS_TIME_STAMP;
}
MLX5_SET(mtpps_reg, in, pin, pin);
@@ -537,25 +695,50 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
}
+static void ts_next_sec(struct timespec64 *ts)
+{
+ ts->tv_sec += 1;
+ ts->tv_nsec = 0;
+}
+
+static u64 perout_conf_next_event_timer(struct mlx5_core_dev *mdev,
+ struct mlx5_clock *clock)
+{
+ bool rt_mode = mlx5_real_time_mode(mdev);
+ struct timespec64 ts;
+ s64 target_ns;
+
+ if (rt_mode)
+ ts = mlx5_ptp_gettimex_real_time(mdev, NULL);
+ else
+ mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
+
+ ts_next_sec(&ts);
+ target_ns = timespec64_to_ns(&ts);
+
+ return rt_mode ? perout_conf_real_time(ts.tv_sec, ts.tv_nsec) :
+ find_target_cycles(mdev, target_ns);
+}
+
static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
struct ptp_clock_event ptp_event;
- u64 cycles_now, cycles_delta;
- u64 nsec_now, nsec_delta, ns;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
struct mlx5_core_dev *mdev;
- struct timespec64 ts;
unsigned long flags;
+ u64 ns;
mdev = container_of(clock, struct mlx5_core_dev, clock);
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
- ptp_event.timestamp =
+ ptp_event.timestamp = mlx5_real_time_mode(mdev) ?
+ mlx5_real_time_cyc2time(clock,
+ be64_to_cpu(eqe->data.pps.time_stamp)) :
mlx5_timecounter_cyc2time(clock,
be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
@@ -569,17 +752,9 @@ static int mlx5_pps_event(struct notifier_block *nb,
ptp_clock_event(clock->ptp, &ptp_event);
break;
case PTP_PF_PEROUT:
- mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL);
- cycles_now = mlx5_read_internal_timer(mdev, NULL);
- ts.tv_sec += 1;
- ts.tv_nsec = 0;
- ns = timespec64_to_ns(&ts);
+ ns = perout_conf_next_event_timer(mdev, clock);
write_seqlock_irqsave(&clock->lock, flags);
- nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
- clock->cycles.mult);
- clock->pps_info.start[pin] = cycles_now + cycles_delta;
+ clock->pps_info.start[pin] = ns;
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_work(&clock->pps_info.out_work);
break;
@@ -591,29 +766,32 @@ static int mlx5_pps_event(struct notifier_block *nb,
return NOTIFY_OK;
}
-void mlx5_init_clock(struct mlx5_core_dev *mdev)
+static void mlx5_timecounter_init(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
- u64 overflow_cycles;
- u64 ns;
- u64 frac = 0;
+ struct mlx5_timer *timer = &clock->timer;
u32 dev_freq;
dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
- if (!dev_freq) {
- mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
- seqlock_init(&clock->lock);
- clock->cycles.read = read_internal_timer;
- clock->cycles.shift = MLX5_CYCLES_SHIFT;
- clock->cycles.mult = clocksource_khz2mult(dev_freq,
- clock->cycles.shift);
- clock->nominal_c_mult = clock->cycles.mult;
- clock->cycles.mask = CLOCKSOURCE_MASK(41);
-
- timecounter_init(&clock->tc, &clock->cycles,
+ timer->cycles.read = read_internal_timer;
+ timer->cycles.shift = MLX5_CYCLES_SHIFT;
+ timer->cycles.mult = clocksource_khz2mult(dev_freq,
+ timer->cycles.shift);
+ timer->nominal_c_mult = timer->cycles.mult;
+ timer->cycles.mask = CLOCKSOURCE_MASK(41);
+
+ timecounter_init(&timer->tc, &timer->cycles,
ktime_to_ns(ktime_get_real()));
+}
+
+static void mlx5_init_overflow_period(struct mlx5_clock *clock)
+{
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
+ struct mlx5_ib_clock_info *clock_info = mdev->clock_info;
+ struct mlx5_timer *timer = &clock->timer;
+ u64 overflow_cycles;
+ u64 frac = 0;
+ u64 ns;
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least twice every wrap around.
@@ -622,32 +800,77 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
* multiplied by clock multiplier where the result doesn't exceed
* 64bits.
*/
- overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
- overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
+ overflow_cycles = div64_u64(~0ULL >> 1, timer->cycles.mult);
+ overflow_cycles = min(overflow_cycles, div_u64(timer->cycles.mask, 3));
- ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
+ ns = cyclecounter_cyc2ns(&timer->cycles, overflow_cycles,
frac, &frac);
do_div(ns, NSEC_PER_SEC / HZ);
- clock->overflow_period = ns;
+ timer->overflow_period = ns;
- mdev->clock_info =
- (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
- if (mdev->clock_info) {
- mdev->clock_info->nsec = clock->tc.nsec;
- mdev->clock_info->cycles = clock->tc.cycle_last;
- mdev->clock_info->mask = clock->cycles.mask;
- mdev->clock_info->mult = clock->nominal_c_mult;
- mdev->clock_info->shift = clock->cycles.shift;
- mdev->clock_info->frac = clock->tc.frac;
- mdev->clock_info->overflow_period = clock->overflow_period;
+ INIT_DELAYED_WORK(&timer->overflow_work, mlx5_timestamp_overflow);
+ if (timer->overflow_period)
+ schedule_delayed_work(&timer->overflow_work, 0);
+ else
+ mlx5_core_warn(mdev,
+ "invalid overflow period, overflow_work is not scheduled\n");
+
+ if (clock_info)
+ clock_info->overflow_period = timer->overflow_period;
+}
+
+static void mlx5_init_clock_info(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ struct mlx5_ib_clock_info *info;
+ struct mlx5_timer *timer;
+
+ mdev->clock_info = (struct mlx5_ib_clock_info *)get_zeroed_page(GFP_KERNEL);
+ if (!mdev->clock_info) {
+ mlx5_core_warn(mdev, "Failed to allocate IB clock info page\n");
+ return;
+ }
+
+ info = mdev->clock_info;
+ timer = &clock->timer;
+
+ info->nsec = timer->tc.nsec;
+ info->cycles = timer->tc.cycle_last;
+ info->mask = timer->cycles.mask;
+ info->mult = timer->nominal_c_mult;
+ info->shift = timer->cycles.shift;
+ info->frac = timer->tc.frac;
+}
+
+static void mlx5_init_timer_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ mlx5_timecounter_init(mdev);
+ mlx5_init_clock_info(mdev);
+ mlx5_init_overflow_period(clock);
+ clock->ptp_info = mlx5_ptp_clock_info;
+
+ if (mlx5_real_time_mode(mdev)) {
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ mlx5_ptp_settime(&clock->ptp_info, &ts);
+ }
+}
+
+void mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) {
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return;
}
+ seqlock_init(&clock->lock);
+ mlx5_init_timer_clock(mdev);
INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
- INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
- if (clock->overflow_period)
- schedule_delayed_work(&clock->overflow_work, 0);
- else
- mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
/* Configure the PHC */
clock->ptp_info = mlx5_ptp_clock_info;
@@ -684,7 +907,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
}
cancel_work_sync(&clock->pps_info.out_work);
- cancel_delayed_work_sync(&clock->overflow_work);
+ cancel_delayed_work_sync(&clock->timer.overflow_work);
if (mdev->clock_info) {
free_page((unsigned long)mdev->clock_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
index 31600924bdc3..a12c7da618a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -33,6 +33,24 @@
#ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__
+static inline bool mlx5_is_real_time_rq(struct mlx5_core_dev *mdev)
+{
+ u8 rq_ts_format_cap = MLX5_CAP_GEN(mdev, rq_ts_format);
+
+ return (rq_ts_format_cap == MLX5_RQ_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
+ rq_ts_format_cap == MLX5_RQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME);
+}
+
+static inline bool mlx5_is_real_time_sq(struct mlx5_core_dev *mdev)
+{
+ u8 sq_ts_format_cap = MLX5_CAP_GEN(mdev, sq_ts_format);
+
+ return (sq_ts_format_cap == MLX5_SQ_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
+ sq_ts_format_cap == MLX5_SQ_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME);
+}
+
+typedef ktime_t (*cqe_ts_to_ns)(struct mlx5_clock *, u64);
+
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
@@ -45,17 +63,27 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
{
+ struct mlx5_timer *timer = &clock->timer;
unsigned int seq;
u64 nsec;
do {
seq = read_seqbegin(&clock->lock);
- nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ nsec = timecounter_cyc2time(&timer->tc, timestamp);
} while (read_seqretry(&clock->lock, seq));
return ns_to_ktime(nsec);
}
+#define REAL_TIME_TO_NS(hi, low) (((u64)hi) * NSEC_PER_SEC + ((u64)low))
+
+static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ u64 time = REAL_TIME_TO_NS(timestamp >> 32, timestamp & 0xFFFFFFFF);
+
+ return ns_to_ktime(time);
+}
#else
static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
@@ -69,6 +97,12 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
{
return 0;
}
+
+static inline ktime_t mlx5_real_time_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ return 0;
+}
#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 947f346bdc2d..381325b4a863 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -141,9 +141,6 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
- if (!mlx5_chains_prios_supported(chains))
- return 1;
-
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
@@ -541,13 +538,13 @@ mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_handle *miss_rule;
struct mlx5_flow_group *miss_group;
struct mlx5_flow_table *next_ft;
struct mlx5_flow_table *ft;
- struct prio *prio_s = NULL;
struct fs_chain *chain_s;
struct list_head *pos;
+ struct prio *prio_s;
u32 *flow_group_in;
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ca6f2fc39ea0..2f2c352f301e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -73,6 +73,9 @@
#include "ecpf.h"
#include "lib/hv_vhca.h"
#include "diag/rsc_dump.h"
+#include "sf/vhca_event.h"
+#include "sf/dev/dev.h"
+#include "sf/sf.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -82,7 +85,6 @@ unsigned int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
-#define MLX5_DEFAULT_PROF 2
static unsigned int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, uint, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
@@ -567,6 +569,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
+ mlx5_vhca_state_cap_handle(dev, set_hca_cap);
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
@@ -884,6 +888,24 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eswitch_cleanup;
}
+ err = mlx5_vhca_event_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
+ goto err_fpga_cleanup;
+ }
+
+ err = mlx5_sf_hw_table_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
+ goto err_sf_hw_table_cleanup;
+ }
+
+ err = mlx5_sf_table_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to init SF table %d\n", err);
+ goto err_sf_table_cleanup;
+ }
+
dev->dm = mlx5_dm_create(dev);
if (IS_ERR(dev->dm))
mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
@@ -894,6 +916,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
return 0;
+err_sf_table_cleanup:
+ mlx5_sf_hw_table_cleanup(dev);
+err_sf_hw_table_cleanup:
+ mlx5_vhca_event_cleanup(dev);
+err_fpga_cleanup:
+ mlx5_fpga_cleanup(dev);
err_eswitch_cleanup:
mlx5_eswitch_cleanup(dev->priv.eswitch);
err_sriov_cleanup:
@@ -925,6 +953,9 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_hv_vhca_destroy(dev->hv_vhca);
mlx5_fw_tracer_destroy(dev->tracer);
mlx5_dm_cleanup(dev);
+ mlx5_sf_table_cleanup(dev);
+ mlx5_sf_hw_table_cleanup(dev);
+ mlx5_vhca_event_cleanup(dev);
mlx5_fpga_cleanup(dev);
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_sriov_cleanup(dev);
@@ -1129,6 +1160,14 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov;
}
+ mlx5_vhca_event_start(dev);
+
+ err = mlx5_sf_hw_table_create(dev);
+ if (err) {
+ mlx5_core_err(dev, "sf table create failed %d\n", err);
+ goto err_vhca;
+ }
+
err = mlx5_ec_init(dev);
if (err) {
mlx5_core_err(dev, "Failed to init embedded CPU\n");
@@ -1141,11 +1180,16 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_sriov;
}
+ mlx5_sf_dev_table_create(dev);
+
return 0;
err_sriov:
mlx5_ec_cleanup(dev);
err_ec:
+ mlx5_sf_hw_table_destroy(dev);
+err_vhca:
+ mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
err_fs:
mlx5_accel_tls_cleanup(dev);
@@ -1171,8 +1215,11 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_ec_cleanup(dev);
+ mlx5_sf_hw_table_destroy(dev);
+ mlx5_vhca_event_stop(dev);
mlx5_cleanup_fs(dev);
mlx5_accel_ipsec_cleanup(dev);
mlx5_accel_tls_cleanup(dev);
@@ -1283,7 +1330,7 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
-static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
+int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
{
struct mlx5_priv *priv = &dev->priv;
int err;
@@ -1305,6 +1352,8 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
+ INIT_LIST_HEAD(&priv->traps);
+
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
@@ -1333,7 +1382,7 @@ err_health_init:
return err;
}
-static void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
+void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -1396,7 +1445,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
pci_save_state(pdev);
- devlink_reload_enable(devlink);
+ if (!mlx5_core_is_mp_slave(dev))
+ devlink_reload_enable(devlink);
return 0;
err_load_one:
@@ -1676,6 +1726,10 @@ static int __init init(void)
if (err)
goto err_debug;
+ err = mlx5_sf_driver_register();
+ if (err)
+ goto err_sf;
+
#ifdef CONFIG_MLX5_CORE_EN
err = mlx5e_init();
if (err) {
@@ -1686,6 +1740,8 @@ static int __init init(void)
return 0;
+err_sf:
+ pci_unregister_driver(&mlx5_core_driver);
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -1696,6 +1752,7 @@ static void __exit cleanup(void)
#ifdef CONFIG_MLX5_CORE_EN
mlx5e_cleanup();
#endif
+ mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0a0302ce7144..efe403c7e354 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -117,6 +117,8 @@ enum mlx5_semaphore_space_address {
MLX5_SEMAPHORE_SW_RESET = 0x20,
};
+#define MLX5_DEFAULT_PROF 2
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_query_board_id(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
@@ -176,6 +178,7 @@ struct cpumask *
mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
+struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
@@ -257,6 +260,17 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
+static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
+{
+ return dev->coredev_type == MLX5_COREDEV_SF;
+}
+
+int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
+void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup);
int mlx5_load_one(struct mlx5_core_dev *dev, bool boot);
+
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out);
+
+void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 6fd974920394..a61e09aff152 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -30,6 +30,9 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *irq_table;
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
if (!irq_table)
return -ENOMEM;
@@ -40,6 +43,9 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev)
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
{
+ if (mlx5_core_is_sf(dev))
+ return;
+
kvfree(dev->priv.irq_table);
}
@@ -268,6 +274,9 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
int nvec;
int err;
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
MLX5_IRQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
@@ -319,6 +328,9 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
struct mlx5_irq_table *table = dev->priv.irq_table;
int i;
+ if (mlx5_core_is_sf(dev))
+ return;
+
/* free_irq requires that affinity and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
@@ -332,3 +344,11 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
kfree(table->irq);
}
+struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(dev))
+ return dev->priv.parent_mdev->priv.irq_table;
+#endif
+ return dev->priv.irq_table;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
new file mode 100644
index 000000000000..0777be24a307
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "qos.h"
+
+#define MLX5_QOS_DEFAULT_DWRR_UID 0
+
+bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, qos))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_sq_scheduling))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_bw_share))
+ return false;
+ if (!MLX5_CAP_QOS(mdev, nic_rate_limit))
+ return false;
+ return true;
+}
+
+int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
+{
+ return 1 << MLX5_CAP_QOS(mdev, log_max_qos_nic_queue_group);
+}
+
+int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_QUEUE_GROUP);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id);
+}
+
+int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ void *attr;
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, element_type,
+ SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ attr = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes);
+ MLX5_SET(tsar_element, attr, tsar_type, TSAR_ELEMENT_TSAR_TYPE_DWRR);
+
+ return mlx5_create_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id);
+}
+
+int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
+{
+ return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
+}
+
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 id)
+{
+ u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+ u32 bitmask = 0;
+
+ MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
+ MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
+ MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
+
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+
+ return mlx5_modify_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC,
+ sched_ctx, id, bitmask);
+}
+
+int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id)
+{
+ return mlx5_destroy_scheduling_element_cmd(mdev, SCHEDULING_HIERARCHY_NIC, id);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
new file mode 100644
index 000000000000..125e4e47e6f7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qos.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_QOS_H
+#define __MLX5_QOS_H
+
+#include "mlx5_core.h"
+
+#define MLX5_DEBUG_QOS_MASK BIT(4)
+
+#define qos_err(mdev, fmt, ...) \
+ mlx5_core_err(mdev, "QoS: " fmt, ##__VA_ARGS__)
+#define qos_warn(mdev, fmt, ...) \
+ mlx5_core_warn(mdev, "QoS: " fmt, ##__VA_ARGS__)
+#define qos_dbg(mdev, fmt, ...) \
+ mlx5_core_dbg_mask(mdev, MLX5_DEBUG_QOS_MASK, "QoS: " fmt, ##__VA_ARGS__)
+
+bool mlx5_qos_is_supported(struct mlx5_core_dev *mdev);
+int mlx5_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
+
+int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id);
+int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
+ u32 bw_share, u32 max_avg_bw, u32 *id);
+int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
+ u32 max_avg_bw, u32 id);
+int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c
new file mode 100644
index 000000000000..a8d75c2f0275
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/cmd.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "priv.h"
+
+int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_sf_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_sf_in)] = {};
+
+ MLX5_SET(alloc_sf_in, in, opcode, MLX5_CMD_OP_ALLOC_SF);
+ MLX5_SET(alloc_sf_in, in, function_id, function_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_sf_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_sf_in)] = {};
+
+ MLX5_SET(dealloc_sf_in, in, opcode, MLX5_CMD_OP_DEALLOC_SF);
+ MLX5_SET(dealloc_sf_in, in, function_id, function_id);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+ u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
+
+ MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+ MLX5_SET(enable_hca_in, in, function_id, func_id);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+}
+
+int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+ u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
+
+ MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+ MLX5_SET(disable_hca_in, in, function_id, func_id);
+ MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
new file mode 100644
index 000000000000..b265f27b2166
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_core.h"
+#include "dev.h"
+#include "sf/vhca_event.h"
+#include "sf/sf.h"
+#include "sf/mlx5_ifc_vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf_dev_table {
+ struct xarray devices;
+ unsigned int max_sfs;
+ phys_addr_t base_address;
+ u64 sf_bar_length;
+ struct notifier_block nb;
+ struct mlx5_core_dev *dev;
+};
+
+static bool mlx5_sf_dev_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf) && mlx5_vhca_event_supported(dev);
+}
+
+bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ if (!mlx5_sf_dev_supported(dev))
+ return false;
+
+ return !xa_empty(&table->devices);
+}
+
+static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev);
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum);
+}
+static DEVICE_ATTR_RO(sfnum);
+
+static struct attribute *sf_device_attrs[] = {
+ &dev_attr_sfnum.attr,
+ NULL,
+};
+
+static const struct attribute_group sf_attr_group = {
+ .attrs = sf_device_attrs,
+};
+
+static const struct attribute_group *sf_attr_groups[2] = {
+ &sf_attr_group,
+ NULL
+};
+
+static void mlx5_sf_dev_release(struct device *device)
+{
+ struct auxiliary_device *adev = container_of(device, struct auxiliary_device, dev);
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ mlx5_adev_idx_free(adev->id);
+ kfree(sf_dev);
+}
+
+static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev)
+{
+ auxiliary_device_delete(&sf_dev->adev);
+ auxiliary_device_uninit(&sf_dev->adev);
+}
+
+static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+ struct mlx5_sf_dev *sf_dev;
+ struct pci_dev *pdev;
+ int err;
+ int id;
+
+ id = mlx5_adev_idx_alloc();
+ if (id < 0) {
+ err = id;
+ goto add_err;
+ }
+
+ sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL);
+ if (!sf_dev) {
+ mlx5_adev_idx_free(id);
+ err = -ENOMEM;
+ goto add_err;
+ }
+ pdev = dev->pdev;
+ sf_dev->adev.id = id;
+ sf_dev->adev.name = MLX5_SF_DEV_ID_NAME;
+ sf_dev->adev.dev.release = mlx5_sf_dev_release;
+ sf_dev->adev.dev.parent = &pdev->dev;
+ sf_dev->adev.dev.groups = sf_attr_groups;
+ sf_dev->sfnum = sfnum;
+ sf_dev->parent_mdev = dev;
+
+ if (!table->max_sfs) {
+ mlx5_adev_idx_free(id);
+ kfree(sf_dev);
+ err = -EOPNOTSUPP;
+ goto add_err;
+ }
+ sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
+
+ err = auxiliary_device_init(&sf_dev->adev);
+ if (err) {
+ mlx5_adev_idx_free(id);
+ kfree(sf_dev);
+ goto add_err;
+ }
+
+ err = auxiliary_device_add(&sf_dev->adev);
+ if (err) {
+ put_device(&sf_dev->adev.dev);
+ goto add_err;
+ }
+
+ err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL);
+ if (err)
+ goto xa_err;
+ return;
+
+xa_err:
+ mlx5_sf_dev_remove(sf_dev);
+add_err:
+ mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
+ sf_index, sfnum, err);
+}
+
+static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev, u16 sf_index)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ xa_erase(&table->devices, sf_index);
+ mlx5_sf_dev_remove(sf_dev);
+}
+
+static int
+mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_code, void *data)
+{
+ struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb);
+ const struct mlx5_vhca_state_event *event = data;
+ struct mlx5_sf_dev *sf_dev;
+ u16 sf_index;
+
+ sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
+ sf_dev = xa_load(&table->devices, sf_index);
+ switch (event->new_vhca_state) {
+ case MLX5_VHCA_STATE_ALLOCATED:
+ if (sf_dev)
+ mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ break;
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ if (sf_dev)
+ mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
+ else
+ mlx5_core_err(table->dev,
+ "SF DEV: teardown state for invalid dev index=%d fn_id=0x%x\n",
+ sf_index, event->sw_function_id);
+ break;
+ case MLX5_VHCA_STATE_ACTIVE:
+ if (!sf_dev)
+ mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
+{
+ struct mlx5_core_dev *dev = table->dev;
+ u16 max_functions;
+ u16 function_id;
+ int err = 0;
+ bool ecpu;
+ int i;
+
+ max_functions = mlx5_sf_max_functions(dev);
+ function_id = MLX5_CAP_GEN(dev, sf_base_id);
+ ecpu = mlx5_read_embedded_cpu(dev);
+ /* Arm the vhca context as the vhca event notifier */
+ for (i = 0; i < max_functions; i++) {
+ err = mlx5_vhca_event_arm(dev, function_id, ecpu);
+ if (err)
+ return err;
+
+ function_id++;
+ }
+ return 0;
+}
+
+void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table;
+ unsigned int max_sfs;
+ int err;
+
+ if (!mlx5_sf_dev_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table) {
+ err = -ENOMEM;
+ goto table_err;
+ }
+
+ table->nb.notifier_call = mlx5_sf_dev_state_change_handler;
+ table->dev = dev;
+ if (MLX5_CAP_GEN(dev, max_num_sf))
+ max_sfs = MLX5_CAP_GEN(dev, max_num_sf);
+ else
+ max_sfs = 1 << MLX5_CAP_GEN(dev, log_max_sf);
+ table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12);
+ table->base_address = pci_resource_start(dev->pdev, 2);
+ table->max_sfs = max_sfs;
+ xa_init(&table->devices);
+ dev->priv.sf_dev_table = table;
+
+ err = mlx5_vhca_event_notifier_register(dev, &table->nb);
+ if (err)
+ goto vhca_err;
+ err = mlx5_sf_dev_vhca_arm_all(table);
+ if (err)
+ goto arm_err;
+ mlx5_core_dbg(dev, "SF DEV: max sf devices=%d\n", max_sfs);
+ return;
+
+arm_err:
+ mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+vhca_err:
+ table->max_sfs = 0;
+ kfree(table);
+ dev->priv.sf_dev_table = NULL;
+table_err:
+ mlx5_core_err(dev, "SF DEV table create err = %d\n", err);
+}
+
+static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
+{
+ struct mlx5_sf_dev *sf_dev;
+ unsigned long index;
+
+ xa_for_each(&table->devices, index, sf_dev) {
+ xa_erase(&table->devices, index);
+ mlx5_sf_dev_remove(sf_dev);
+ }
+}
+
+void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(dev, &table->nb);
+
+ /* Now that event handler is not running, it is safe to destroy
+ * the sf device without race.
+ */
+ mlx5_sf_dev_destroy_all(table);
+
+ WARN_ON(!xa_empty(&table->devices));
+ kfree(table);
+ dev->priv.sf_dev_table = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
new file mode 100644
index 000000000000..4de02902aef1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_DEV_H__
+#define __MLX5_SF_DEV_H__
+
+#ifdef CONFIG_MLX5_SF
+
+#include <linux/auxiliary_bus.h>
+
+#define MLX5_SF_DEV_ID_NAME "sf"
+
+struct mlx5_sf_dev {
+ struct auxiliary_device adev;
+ struct mlx5_core_dev *parent_mdev;
+ struct mlx5_core_dev *mdev;
+ phys_addr_t bar_base_addr;
+ u32 sfnum;
+};
+
+void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_sf_driver_register(void);
+void mlx5_sf_driver_unregister(void);
+
+bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev);
+
+#else
+
+static inline void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_sf_dev_table_destroy(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_driver_register(void)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_driver_unregister(void)
+{
+}
+
+static inline bool mlx5_sf_dev_allocated(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
new file mode 100644
index 000000000000..c4bf555c25ea
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/device.h>
+#include "mlx5_core.h"
+#include "dev.h"
+#include "devlink.h"
+
+static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxiliary_device_id *id)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct mlx5_core_dev *mdev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = mlx5_devlink_alloc();
+ if (!devlink)
+ return -ENOMEM;
+
+ mdev = devlink_priv(devlink);
+ mdev->device = &adev->dev;
+ mdev->pdev = sf_dev->parent_mdev->pdev;
+ mdev->bar_addr = sf_dev->bar_base_addr;
+ mdev->iseg_base = sf_dev->bar_base_addr;
+ mdev->coredev_type = MLX5_COREDEV_SF;
+ mdev->priv.parent_mdev = sf_dev->parent_mdev;
+ mdev->priv.adev_idx = adev->id;
+ sf_dev->mdev = mdev;
+
+ err = mlx5_mdev_init(mdev, MLX5_DEFAULT_PROF);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_mdev_init on err=%d\n", err);
+ goto mdev_err;
+ }
+
+ mdev->iseg = ioremap(mdev->iseg_base, sizeof(*mdev->iseg));
+ if (!mdev->iseg) {
+ mlx5_core_warn(mdev, "remap error\n");
+ err = -ENOMEM;
+ goto remap_err;
+ }
+
+ err = mlx5_load_one(mdev, true);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_load_one err=%d\n", err);
+ goto load_one_err;
+ }
+ return 0;
+
+load_one_err:
+ iounmap(mdev->iseg);
+remap_err:
+ mlx5_mdev_uninit(mdev);
+mdev_err:
+ mlx5_devlink_free(devlink);
+ return err;
+}
+
+static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+ struct devlink *devlink;
+
+ devlink = priv_to_devlink(sf_dev->mdev);
+ mlx5_unload_one(sf_dev->mdev, true);
+ iounmap(sf_dev->mdev->iseg);
+ mlx5_mdev_uninit(sf_dev->mdev);
+ mlx5_devlink_free(devlink);
+}
+
+static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
+{
+ struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
+
+ mlx5_unload_one(sf_dev->mdev, false);
+}
+
+static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
+ { .name = MLX5_ADEV_NAME "." MLX5_SF_DEV_ID_NAME, },
+ { },
+};
+
+MODULE_DEVICE_TABLE(auxiliary, mlx5_sf_dev_id_table);
+
+static struct auxiliary_driver mlx5_sf_driver = {
+ .name = MLX5_SF_DEV_ID_NAME,
+ .probe = mlx5_sf_dev_probe,
+ .remove = mlx5_sf_dev_remove,
+ .shutdown = mlx5_sf_dev_shutdown,
+ .id_table = mlx5_sf_dev_id_table,
+};
+
+int mlx5_sf_driver_register(void)
+{
+ return auxiliary_driver_register(&mlx5_sf_driver);
+}
+
+void mlx5_sf_driver_unregister(void)
+{
+ auxiliary_driver_unregister(&mlx5_sf_driver);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
new file mode 100644
index 000000000000..c2ba41bb7a70
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "eswitch.h"
+#include "priv.h"
+#include "sf/dev/dev.h"
+#include "mlx5_ifc_vhca_event.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf {
+ struct devlink_port dl_port;
+ unsigned int port_index;
+ u16 id;
+ u16 hw_fn_id;
+ u16 hw_state;
+};
+
+struct mlx5_sf_table {
+ struct mlx5_core_dev *dev; /* To refer from notifier context. */
+ struct xarray port_indices; /* port index based lookup. */
+ refcount_t refcount;
+ struct completion disable_complete;
+ struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
+ struct notifier_block esw_nb;
+ struct notifier_block vhca_nb;
+ u8 ecpu: 1;
+};
+
+static struct mlx5_sf *
+mlx5_sf_lookup_by_index(struct mlx5_sf_table *table, unsigned int port_index)
+{
+ return xa_load(&table->port_indices, port_index);
+}
+
+static struct mlx5_sf *
+mlx5_sf_lookup_by_function_id(struct mlx5_sf_table *table, unsigned int fn_id)
+{
+ unsigned long index;
+ struct mlx5_sf *sf;
+
+ xa_for_each(&table->port_indices, index, sf) {
+ if (sf->hw_fn_id == fn_id)
+ return sf;
+ }
+ return NULL;
+}
+
+static int mlx5_sf_id_insert(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ return xa_insert(&table->port_indices, sf->port_index, sf, GFP_KERNEL);
+}
+
+static void mlx5_sf_id_erase(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ xa_erase(&table->port_indices, sf->port_index);
+}
+
+static struct mlx5_sf *
+mlx5_sf_alloc(struct mlx5_sf_table *table, u32 sfnum, struct netlink_ext_ack *extack)
+{
+ unsigned int dl_port_index;
+ struct mlx5_sf *sf;
+ u16 hw_fn_id;
+ int id_err;
+ int err;
+
+ id_err = mlx5_sf_hw_table_sf_alloc(table->dev, sfnum);
+ if (id_err < 0) {
+ err = id_err;
+ goto id_err;
+ }
+
+ sf = kzalloc(sizeof(*sf), GFP_KERNEL);
+ if (!sf) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ sf->id = id_err;
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sf->id);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(table->dev, hw_fn_id);
+ sf->port_index = dl_port_index;
+ sf->hw_fn_id = hw_fn_id;
+ sf->hw_state = MLX5_VHCA_STATE_ALLOCATED;
+
+ err = mlx5_sf_id_insert(table, sf);
+ if (err)
+ goto insert_err;
+
+ return sf;
+
+insert_err:
+ kfree(sf);
+alloc_err:
+ mlx5_sf_hw_table_sf_free(table->dev, id_err);
+id_err:
+ if (err == -EEXIST)
+ NL_SET_ERR_MSG_MOD(extack, "SF already exist. Choose different sfnum");
+ return ERR_PTR(err);
+}
+
+static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ mlx5_sf_id_erase(table, sf);
+ mlx5_sf_hw_table_sf_free(table->dev, sf->id);
+ kfree(sf);
+}
+
+static struct mlx5_sf_table *mlx5_sf_table_try_get(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return NULL;
+
+ return refcount_inc_not_zero(&table->refcount) ? table : NULL;
+}
+
+static void mlx5_sf_table_put(struct mlx5_sf_table *table)
+{
+ if (refcount_dec_and_test(&table->refcount))
+ complete(&table->disable_complete);
+}
+
+static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
+{
+ switch (hw_state) {
+ case MLX5_VHCA_STATE_ACTIVE:
+ case MLX5_VHCA_STATE_IN_USE:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ return DEVLINK_PORT_FN_STATE_ACTIVE;
+ case MLX5_VHCA_STATE_INVALID:
+ case MLX5_VHCA_STATE_ALLOCATED:
+ default:
+ return DEVLINK_PORT_FN_STATE_INACTIVE;
+ }
+}
+
+static enum devlink_port_fn_opstate mlx5_sf_to_devlink_opstate(u8 hw_state)
+{
+ switch (hw_state) {
+ case MLX5_VHCA_STATE_IN_USE:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
+ return DEVLINK_PORT_FN_OPSTATE_ATTACHED;
+ case MLX5_VHCA_STATE_INVALID:
+ case MLX5_VHCA_STATE_ALLOCATED:
+ case MLX5_VHCA_STATE_ACTIVE:
+ default:
+ return DEVLINK_PORT_FN_OPSTATE_DETACHED;
+ }
+}
+
+static bool mlx5_sf_is_active(const struct mlx5_sf *sf)
+{
+ return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE;
+}
+
+int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err = 0;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table)
+ return -EOPNOTSUPP;
+
+ sf = mlx5_sf_lookup_by_index(table, dl_port->index);
+ if (!sf) {
+ err = -EOPNOTSUPP;
+ goto sf_err;
+ }
+ mutex_lock(&table->sf_state_lock);
+ *state = mlx5_sf_to_devlink_state(sf->hw_state);
+ *opstate = mlx5_sf_to_devlink_opstate(sf->hw_state);
+ mutex_unlock(&table->sf_state_lock);
+sf_err:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+{
+ int err;
+
+ if (mlx5_sf_is_active(sf))
+ return 0;
+ if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
+ return -EINVAL;
+
+ err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
+ if (err)
+ return err;
+
+ sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
+ return 0;
+}
+
+static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+{
+ int err;
+
+ if (!mlx5_sf_is_active(sf))
+ return 0;
+
+ err = mlx5_cmd_sf_disable_hca(dev, sf->hw_fn_id);
+ if (err)
+ return err;
+
+ sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
+ return 0;
+}
+
+static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
+ struct mlx5_sf *sf,
+ enum devlink_port_fn_state state)
+{
+ int err = 0;
+
+ mutex_lock(&table->sf_state_lock);
+ if (state == mlx5_sf_to_devlink_state(sf->hw_state))
+ goto out;
+ if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
+ err = mlx5_sf_activate(dev, sf);
+ else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
+ err = mlx5_sf_deactivate(dev, sf);
+ else
+ err = -EINVAL;
+out:
+ mutex_unlock(&table->sf_state_lock);
+ return err;
+}
+
+int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port state set is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ sf = mlx5_sf_lookup_by_index(table, dl_port->index);
+ if (!sf) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ err = mlx5_sf_state_set(dev, table, sf, state);
+out:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index)
+{
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_sf *sf;
+ u16 hw_fn_id;
+ int err;
+
+ sf = mlx5_sf_alloc(table, new_attr->sfnum, extack);
+ if (IS_ERR(sf))
+ return PTR_ERR(sf);
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, sf->id);
+ err = mlx5_esw_offloads_sf_vport_enable(esw, &sf->dl_port, hw_fn_id, new_attr->sfnum);
+ if (err)
+ goto esw_err;
+ *new_port_index = sf->port_index;
+ return 0;
+
+esw_err:
+ mlx5_sf_free(table, sf);
+ return err;
+}
+
+static int
+mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack)
+{
+ if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) {
+ NL_SET_ERR_MSG_MOD(extack, "Driver supports only SF port addition");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->port_index_valid) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Driver does not support user defined port index assignment");
+ return -EOPNOTSUPP;
+ }
+ if (!new_attr->sfnum_valid) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "User must provide unique sfnum. Driver does not support auto assignment");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->controller_valid && new_attr->controller) {
+ NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
+ return -EOPNOTSUPP;
+ }
+ if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+int mlx5_devlink_sf_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *new_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_sf_table *table;
+ int err;
+
+ err = mlx5_sf_new_check_attr(dev, new_attr, extack);
+ if (err)
+ return err;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index);
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
+{
+ if (sf->hw_state == MLX5_VHCA_STATE_ALLOCATED) {
+ mlx5_sf_free(table, sf);
+ } else if (mlx5_sf_is_active(sf)) {
+ /* Even if its active, it is treated as in_use because by the time,
+ * it is disabled here, it may getting used. So it is safe to
+ * always look for the event to ensure that it is recycled only after
+ * firmware gives confirmation that it is detached by the driver.
+ */
+ mlx5_cmd_sf_disable_hca(table->dev, sf->hw_fn_id);
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ kfree(sf);
+ } else {
+ mlx5_sf_hw_table_sf_deferred_free(table->dev, sf->id);
+ kfree(sf);
+ }
+}
+
+int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ struct mlx5_sf_table *table;
+ struct mlx5_sf *sf;
+ int err = 0;
+
+ table = mlx5_sf_table_try_get(dev);
+ if (!table) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
+ return -EOPNOTSUPP;
+ }
+ sf = mlx5_sf_lookup_by_index(table, port_index);
+ if (!sf) {
+ err = -ENODEV;
+ goto sf_err;
+ }
+
+ mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
+ mlx5_sf_id_erase(table, sf);
+
+ mutex_lock(&table->sf_state_lock);
+ mlx5_sf_dealloc(table, sf);
+ mutex_unlock(&table->sf_state_lock);
+sf_err:
+ mlx5_sf_table_put(table);
+ return err;
+}
+
+static bool mlx5_sf_state_update_check(const struct mlx5_sf *sf, u8 new_state)
+{
+ if (sf->hw_state == MLX5_VHCA_STATE_ACTIVE && new_state == MLX5_VHCA_STATE_IN_USE)
+ return true;
+
+ if (sf->hw_state == MLX5_VHCA_STATE_IN_USE && new_state == MLX5_VHCA_STATE_ACTIVE)
+ return true;
+
+ if (sf->hw_state == MLX5_VHCA_STATE_TEARDOWN_REQUEST &&
+ new_state == MLX5_VHCA_STATE_ALLOCATED)
+ return true;
+
+ return false;
+}
+
+static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
+{
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, vhca_nb);
+ const struct mlx5_vhca_state_event *event = data;
+ bool update = false;
+ struct mlx5_sf *sf;
+
+ table = mlx5_sf_table_try_get(table->dev);
+ if (!table)
+ return 0;
+
+ mutex_lock(&table->sf_state_lock);
+ sf = mlx5_sf_lookup_by_function_id(table, event->function_id);
+ if (!sf)
+ goto sf_err;
+
+ /* When driver is attached or detached to a function, an event
+ * notifies such state change.
+ */
+ update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
+ if (update)
+ sf->hw_state = event->new_vhca_state;
+sf_err:
+ mutex_unlock(&table->sf_state_lock);
+ mlx5_sf_table_put(table);
+ return 0;
+}
+
+static void mlx5_sf_table_enable(struct mlx5_sf_table *table)
+{
+ if (!mlx5_sf_max_functions(table->dev))
+ return;
+
+ init_completion(&table->disable_complete);
+ refcount_set(&table->refcount, 1);
+}
+
+static void mlx5_sf_deactivate_all(struct mlx5_sf_table *table)
+{
+ struct mlx5_eswitch *esw = table->dev->priv.eswitch;
+ unsigned long index;
+ struct mlx5_sf *sf;
+
+ /* At this point, no new user commands can start and no vhca event can
+ * arrive. It is safe to destroy all user created SFs.
+ */
+ xa_for_each(&table->port_indices, index, sf) {
+ mlx5_esw_offloads_sf_vport_disable(esw, sf->hw_fn_id);
+ mlx5_sf_id_erase(table, sf);
+ mlx5_sf_dealloc(table, sf);
+ }
+}
+
+static void mlx5_sf_table_disable(struct mlx5_sf_table *table)
+{
+ if (!mlx5_sf_max_functions(table->dev))
+ return;
+
+ if (!refcount_read(&table->refcount))
+ return;
+
+ /* Balances with refcount_set; drop the reference so that new user cmd cannot start
+ * and new vhca event handler cannnot run.
+ */
+ mlx5_sf_table_put(table);
+ wait_for_completion(&table->disable_complete);
+
+ mlx5_sf_deactivate_all(table);
+}
+
+static int mlx5_sf_esw_event(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct mlx5_sf_table *table = container_of(nb, struct mlx5_sf_table, esw_nb);
+ const struct mlx5_esw_event_info *mode = data;
+
+ switch (mode->new_mode) {
+ case MLX5_ESWITCH_OFFLOADS:
+ mlx5_sf_table_enable(table);
+ break;
+ case MLX5_ESWITCH_NONE:
+ mlx5_sf_table_disable(table);
+ break;
+ default:
+ break;
+ };
+
+ return 0;
+}
+
+static bool mlx5_sf_table_supported(const struct mlx5_core_dev *dev)
+{
+ return dev->priv.eswitch && MLX5_ESWITCH_MANAGER(dev) && mlx5_sf_supported(dev);
+}
+
+int mlx5_sf_table_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table;
+ int err;
+
+ if (!mlx5_sf_table_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return 0;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ mutex_init(&table->sf_state_lock);
+ table->dev = dev;
+ xa_init(&table->port_indices);
+ dev->priv.sf_table = table;
+ refcount_set(&table->refcount, 0);
+ table->esw_nb.notifier_call = mlx5_sf_esw_event;
+ err = mlx5_esw_event_notifier_register(dev->priv.eswitch, &table->esw_nb);
+ if (err)
+ goto reg_err;
+
+ table->vhca_nb.notifier_call = mlx5_sf_vhca_event;
+ err = mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+ if (err)
+ goto vhca_err;
+
+ return 0;
+
+vhca_err:
+ mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
+reg_err:
+ mutex_destroy(&table->sf_state_lock);
+ kfree(table);
+ dev->priv.sf_table = NULL;
+ return err;
+}
+
+void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_table *table = dev->priv.sf_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+ mlx5_esw_event_notifier_unregister(dev->priv.eswitch, &table->esw_nb);
+ WARN_ON(refcount_read(&table->refcount));
+ mutex_destroy(&table->sf_state_lock);
+ WARN_ON(!xa_empty(&table->port_indices));
+ kfree(table);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
new file mode 100644
index 000000000000..58b6be0b03d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+#include <linux/mlx5/driver.h>
+#include "vhca_event.h"
+#include "priv.h"
+#include "sf.h"
+#include "mlx5_ifc_vhca_event.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_sf_hw {
+ u32 usr_sfnum;
+ u8 allocated: 1;
+ u8 pending_delete: 1;
+};
+
+struct mlx5_sf_hw_table {
+ struct mlx5_core_dev *dev;
+ struct mlx5_sf_hw *sfs;
+ int max_local_functions;
+ u8 ecpu: 1;
+ struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
+ struct notifier_block vhca_nb;
+};
+
+u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id)
+{
+ return sw_id + mlx5_sf_start_function_id(dev);
+}
+
+static u16 mlx5_sf_hw_to_sw_id(const struct mlx5_core_dev *dev, u16 hw_id)
+{
+ return hw_id - mlx5_sf_start_function_id(dev);
+}
+
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ int sw_id = -ENOSPC;
+ u16 hw_fn_id;
+ int err;
+ int i;
+
+ if (!table->max_local_functions)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&table->table_lock);
+ /* Check if sf with same sfnum already exists or not. */
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (table->sfs[i].allocated && table->sfs[i].usr_sfnum == usr_sfnum) {
+ err = -EEXIST;
+ goto exist_err;
+ }
+ }
+
+ /* Find the free entry and allocate the entry from the array */
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (!table->sfs[i].allocated) {
+ table->sfs[i].usr_sfnum = usr_sfnum;
+ table->sfs[i].allocated = true;
+ sw_id = i;
+ break;
+ }
+ }
+ if (sw_id == -ENOSPC) {
+ err = -ENOSPC;
+ goto err;
+ }
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
+ err = mlx5_cmd_alloc_sf(table->dev, hw_fn_id);
+ if (err)
+ goto err;
+
+ err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum);
+ if (err)
+ goto vhca_err;
+
+ mutex_unlock(&table->table_lock);
+ return sw_id;
+
+vhca_err:
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+err:
+ table->sfs[i].allocated = false;
+exist_err:
+ mutex_unlock(&table->table_lock);
+ return err;
+}
+
+static void _mlx5_sf_hw_id_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ u16 hw_fn_id;
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, id);
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+ table->sfs[id].allocated = false;
+ table->sfs[id].pending_delete = false;
+}
+
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ mutex_lock(&table->table_lock);
+ _mlx5_sf_hw_id_free(dev, id);
+ mutex_unlock(&table->table_lock);
+}
+
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+ u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ u16 hw_fn_id;
+ u8 state;
+ int err;
+
+ hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
+ mutex_lock(&table->table_lock);
+ err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out));
+ if (err)
+ goto err;
+ state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
+ if (state == MLX5_VHCA_STATE_ALLOCATED) {
+ mlx5_cmd_dealloc_sf(table->dev, hw_fn_id);
+ table->sfs[id].allocated = false;
+ } else {
+ table->sfs[id].pending_delete = true;
+ }
+err:
+ mutex_unlock(&table->table_lock);
+}
+
+static void mlx5_sf_hw_dealloc_all(struct mlx5_sf_hw_table *table)
+{
+ int i;
+
+ for (i = 0; i < table->max_local_functions; i++) {
+ if (table->sfs[i].allocated)
+ _mlx5_sf_hw_id_free(table->dev, i);
+ }
+}
+
+int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table;
+ struct mlx5_sf_hw *sfs;
+ int max_functions;
+
+ if (!mlx5_sf_supported(dev) || !mlx5_vhca_event_supported(dev))
+ return 0;
+
+ max_functions = mlx5_sf_max_functions(dev);
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ sfs = kcalloc(max_functions, sizeof(*sfs), GFP_KERNEL);
+ if (!sfs)
+ goto table_err;
+
+ mutex_init(&table->table_lock);
+ table->dev = dev;
+ table->sfs = sfs;
+ table->max_local_functions = max_functions;
+ table->ecpu = mlx5_read_embedded_cpu(dev);
+ dev->priv.sf_hw_table = table;
+ mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
+ return 0;
+
+table_err:
+ kfree(table);
+ return -ENOMEM;
+}
+
+void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return;
+
+ mutex_destroy(&table->table_lock);
+ kfree(table->sfs);
+ kfree(table);
+}
+
+static int mlx5_sf_hw_vhca_event(struct notifier_block *nb, unsigned long opcode, void *data)
+{
+ struct mlx5_sf_hw_table *table = container_of(nb, struct mlx5_sf_hw_table, vhca_nb);
+ const struct mlx5_vhca_state_event *event = data;
+ struct mlx5_sf_hw *sf_hw;
+ u16 sw_id;
+
+ if (event->new_vhca_state != MLX5_VHCA_STATE_ALLOCATED)
+ return 0;
+
+ sw_id = mlx5_sf_hw_to_sw_id(table->dev, event->function_id);
+ sf_hw = &table->sfs[sw_id];
+
+ mutex_lock(&table->table_lock);
+ /* SF driver notified through firmware that SF is finally detached.
+ * Hence recycle the sf hardware id for reuse.
+ */
+ if (sf_hw->allocated && sf_hw->pending_delete)
+ _mlx5_sf_hw_id_free(table->dev, sw_id);
+ mutex_unlock(&table->table_lock);
+ return 0;
+}
+
+int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return 0;
+
+ table->vhca_nb.notifier_call = mlx5_sf_hw_vhca_event;
+ return mlx5_vhca_event_notifier_register(table->dev, &table->vhca_nb);
+}
+
+void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
+{
+ struct mlx5_sf_hw_table *table = dev->priv.sf_hw_table;
+
+ if (!table)
+ return;
+
+ mlx5_vhca_event_notifier_unregister(table->dev, &table->vhca_nb);
+ /* Dealloc SFs whose firmware event has been missed. */
+ mlx5_sf_hw_dealloc_all(table);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
new file mode 100644
index 000000000000..1daf5a122ba3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_IFC_VHCA_EVENT_H__
+#define __MLX5_IFC_VHCA_EVENT_H__
+
+enum mlx5_ifc_vhca_state {
+ MLX5_VHCA_STATE_INVALID = 0x0,
+ MLX5_VHCA_STATE_ALLOCATED = 0x1,
+ MLX5_VHCA_STATE_ACTIVE = 0x2,
+ MLX5_VHCA_STATE_IN_USE = 0x3,
+ MLX5_VHCA_STATE_TEARDOWN_REQUEST = 0x4,
+};
+
+struct mlx5_ifc_vhca_state_context_bits {
+ u8 arm_change_event[0x1];
+ u8 reserved_at_1[0xb];
+ u8 vhca_state[0x4];
+ u8 reserved_at_10[0x10];
+
+ u8 sw_function_id[0x20];
+
+ u8 reserved_at_40[0x80];
+};
+
+struct mlx5_ifc_query_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_vhca_state_context_bits vhca_state_context;
+};
+
+struct mlx5_ifc_query_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_vhca_state_field_select_bits {
+ u8 reserved_at_0[0x1e];
+ u8 sw_function_id[0x1];
+ u8 arm_change_event[0x1];
+};
+
+struct mlx5_ifc_modify_vhca_state_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_vhca_state_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 embedded_cpu_function[0x1];
+ u8 reserved_at_41[0xf];
+ u8 function_id[0x10];
+
+ struct mlx5_ifc_vhca_state_field_select_bits vhca_state_field_select;
+
+ struct mlx5_ifc_vhca_state_context_bits vhca_state_context;
+};
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
new file mode 100644
index 000000000000..cb02a51d0986
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/priv.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_PRIV_H__
+#define __MLX5_SF_PRIV_H__
+
+#include <linux/mlx5/driver.h>
+
+int mlx5_cmd_alloc_sf(struct mlx5_core_dev *dev, u16 function_id);
+int mlx5_cmd_dealloc_sf(struct mlx5_core_dev *dev, u16 function_id);
+
+int mlx5_cmd_sf_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
+int mlx5_cmd_sf_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
+
+u16 mlx5_sf_sw_to_hw_id(const struct mlx5_core_dev *dev, u16 sw_id);
+
+int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum);
+void mlx5_sf_hw_table_sf_free(struct mlx5_core_dev *dev, u16 id);
+void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
new file mode 100644
index 000000000000..0b6aea1e6a94
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_SF_H__
+#define __MLX5_SF_H__
+
+#include <linux/mlx5/driver.h>
+
+static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf_base_id);
+}
+
+#ifdef CONFIG_MLX5_SF
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf);
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ if (!mlx5_sf_supported(dev))
+ return 0;
+ if (MLX5_CAP_GEN(dev, max_num_sf))
+ return MLX5_CAP_GEN(dev, max_num_sf);
+ else
+ return 1 << MLX5_CAP_GEN(dev, log_max_sf);
+}
+
+#else
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef CONFIG_MLX5_SF_MANAGER
+
+int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev);
+
+int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev);
+void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev);
+
+int mlx5_sf_table_init(struct mlx5_core_dev *dev);
+void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
+
+int mlx5_devlink_sf_port_new(struct devlink *devlink,
+ const struct devlink_port_new_attrs *add_attr,
+ struct netlink_ext_ack *extack,
+ unsigned int *new_port_index);
+int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state *state,
+ enum devlink_port_fn_opstate *opstate,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port,
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack);
+#else
+
+static inline int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_hw_table_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_hw_table_create(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_hw_table_destroy(struct mlx5_core_dev *dev)
+{
+}
+
+static inline int mlx5_sf_table_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
new file mode 100644
index 000000000000..af2f2dd9db25
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_ifc_vhca_event.h"
+#include "mlx5_core.h"
+#include "vhca_event.h"
+#include "ecpf.h"
+
+struct mlx5_vhca_state_notifier {
+ struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
+ struct blocking_notifier_head n_head;
+};
+
+struct mlx5_vhca_event_work {
+ struct work_struct work;
+ struct mlx5_vhca_state_notifier *notifier;
+ struct mlx5_vhca_state_event event;
+};
+
+int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *out, u32 outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
+
+ MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
+ MLX5_SET(query_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu);
+
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *in, u32 inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
+ MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+
+ return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
+ MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
+ MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
+
+ return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
+}
+
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
+
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
+ MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
+
+ return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in));
+}
+
+static void
+mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *event)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
+ int err;
+
+ err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out));
+ if (err)
+ return;
+
+ event->sw_function_id = MLX5_GET(query_vhca_state_out, out,
+ vhca_state_context.sw_function_id);
+ event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
+ vhca_state_context.vhca_state);
+
+ mlx5_vhca_event_arm(dev, event->function_id, event->ecpu);
+
+ blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
+}
+
+static void mlx5_vhca_state_work_handler(struct work_struct *_work)
+{
+ struct mlx5_vhca_event_work *work = container_of(_work, struct mlx5_vhca_event_work, work);
+ struct mlx5_vhca_state_notifier *notifier = work->notifier;
+ struct mlx5_core_dev *dev = notifier->dev;
+
+ mlx5_vhca_event_notify(dev, &work->event);
+}
+
+static int
+mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, void *data)
+{
+ struct mlx5_vhca_state_notifier *notifier =
+ mlx5_nb_cof(nb, struct mlx5_vhca_state_notifier, nb);
+ struct mlx5_vhca_event_work *work;
+ struct mlx5_eqe *eqe = data;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return NOTIFY_DONE;
+ INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
+ work->notifier = notifier;
+ work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
+ work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function);
+ mlx5_events_work_enqueue(notifier->dev, &work->work);
+ return NOTIFY_OK;
+}
+
+void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
+{
+ if (!mlx5_vhca_event_supported(dev))
+ return;
+
+ MLX5_SET(cmd_hca_cap, set_hca_cap, vhca_state, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_allocated, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_active, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_in_use, 1);
+ MLX5_SET(cmd_hca_cap, set_hca_cap, event_on_vhca_state_teardown_request, 1);
+}
+
+int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!mlx5_vhca_event_supported(dev))
+ return 0;
+
+ notifier = kzalloc(sizeof(*notifier), GFP_KERNEL);
+ if (!notifier)
+ return -ENOMEM;
+
+ dev->priv.vhca_state_notifier = notifier;
+ notifier->dev = dev;
+ BLOCKING_INIT_NOTIFIER_HEAD(&notifier->n_head);
+ MLX5_NB_INIT(&notifier->nb, mlx5_vhca_state_change_notifier, VHCA_STATE_CHANGE);
+ return 0;
+}
+
+void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
+{
+ if (!mlx5_vhca_event_supported(dev))
+ return;
+
+ kfree(dev->priv.vhca_state_notifier);
+ dev->priv.vhca_state_notifier = NULL;
+}
+
+void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!dev->priv.vhca_state_notifier)
+ return;
+
+ notifier = dev->priv.vhca_state_notifier;
+ mlx5_eq_notifier_register(dev, &notifier->nb);
+}
+
+void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
+{
+ struct mlx5_vhca_state_notifier *notifier;
+
+ if (!dev->priv.vhca_state_notifier)
+ return;
+
+ notifier = dev->priv.vhca_state_notifier;
+ mlx5_eq_notifier_unregister(dev, &notifier->nb);
+}
+
+int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ if (!dev->priv.vhca_state_notifier)
+ return -EOPNOTSUPP;
+ return blocking_notifier_chain_register(&dev->priv.vhca_state_notifier->n_head, nb);
+}
+
+void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&dev->priv.vhca_state_notifier->n_head, nb);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
new file mode 100644
index 000000000000..1fe1ec6f4d4b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies Ltd */
+
+#ifndef __MLX5_VHCA_EVENT_H__
+#define __MLX5_VHCA_EVENT_H__
+
+#ifdef CONFIG_MLX5_SF
+
+struct mlx5_vhca_state_event {
+ u16 function_id;
+ u16 sw_function_id;
+ u8 new_vhca_state;
+ bool ecpu;
+};
+
+static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN_MAX(dev, vhca_state);
+}
+
+void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap);
+int mlx5_vhca_event_init(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
+void mlx5_vhca_event_stop(struct mlx5_core_dev *dev);
+int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
+void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id);
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu);
+int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
+ bool ecpu, u32 *out, u32 outlen);
+#else
+
+static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
+{
+}
+
+static inline int mlx5_vhca_event_init(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline void mlx5_vhca_event_cleanup(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_vhca_event_start(struct mlx5_core_dev *dev)
+{
+}
+
+static inline void mlx5_vhca_event_stop(struct mlx5_core_dev *dev)
+{
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index df1363a34a42..28a7971cac6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -218,158 +218,6 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
},
};
-struct dr_action_modify_field_conv {
- u16 hw_field;
- u8 start;
- u8 end;
- u8 l3_type;
- u8 l4_type;
-};
-
-static const struct dr_action_modify_field_conv dr_action_conv_arr[] = {
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 16, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_1, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 32, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 16, .end = 47,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_0, .start = 0, .end = 15,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 0, .end = 5,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 48, .end = 56,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_1, .start = 8, .end = 15,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 0, .end = 15,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_0, .start = 16, .end = 31,
- .l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_3, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_4, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_2, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6,
- },
- [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 0, .end = 31,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L3_0, .start = 32, .end = 63,
- .l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_METADATA, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_0, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_REG_2, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 32, .end = 63,
- },
- [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L4_1, .start = 0, .end = 31,
- },
- [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
- .hw_field = MLX5DR_ACTION_MDFY_HW_FLD_L2_2, .start = 0, .end = 15,
- },
-};
-
-#define MAX_VLANS 2
-struct dr_action_vlan_info {
- int count;
- u32 headers[MAX_VLANS];
-};
-
-struct dr_action_apply_attr {
- u32 modify_index;
- u16 modify_actions;
- u32 decap_index;
- u16 decap_actions;
- u8 decap_with_vlan:1;
- u64 final_icm_addr;
- u32 flow_tag;
- u32 ctr_id;
- u16 gvmi;
- u16 hit_gvmi;
- u32 reformat_id;
- u32 reformat_size;
- struct dr_action_vlan_info vlans;
-};
-
static int
dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
enum mlx5dr_action_type *action_type)
@@ -394,141 +242,6 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
return 0;
}
-static void dr_actions_init_next_ste(u8 **last_ste,
- u32 *added_stes,
- enum mlx5dr_ste_entry_type entry_type,
- u16 gvmi)
-{
- (*added_stes)++;
- *last_ste += DR_STE_SIZE;
- mlx5dr_ste_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, entry_type, gvmi);
-}
-
-static void dr_actions_apply_tx(struct mlx5dr_domain *dmn,
- u8 *action_type_set,
- u8 *last_ste,
- struct dr_action_apply_attr *attr,
- u32 *added_stes)
-{
- bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
- action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
-
- /* We want to make sure the modify header comes before L2
- * encapsulation. The reason for that is that we support
- * modify headers for outer headers only
- */
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->modify_actions,
- attr->modify_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
- int i;
-
- for (i = 0; i < attr->vlans.count; i++) {
- if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_TX,
- attr->gvmi);
-
- mlx5dr_ste_set_tx_push_vlan(last_ste,
- attr->vlans.headers[i],
- encap);
- }
- }
-
- if (encap) {
- /* Modify header and encapsulation require a different STEs.
- * Since modify header STE format doesn't support encapsulation
- * tunneling_action.
- */
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
- action_type_set[DR_ACTION_TYP_PUSH_VLAN])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_TX,
- attr->gvmi);
-
- mlx5dr_ste_set_tx_encap(last_ste,
- attr->reformat_id,
- attr->reformat_size,
- action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
- /* Whenever prio_tag_required enabled, we can be sure that the
- * previous table (ACL) already push vlan to our packet,
- * And due to HW limitation we need to set this bit, otherwise
- * push vlan + reformat will not work.
- */
- if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
- mlx5dr_ste_set_go_back_bit(last_ste);
- }
-
- if (action_type_set[DR_ACTION_TYP_CTR])
- mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
-}
-
-static void dr_actions_apply_rx(u8 *action_type_set,
- u8 *last_ste,
- struct dr_action_apply_attr *attr,
- u32 *added_stes)
-{
- if (action_type_set[DR_ACTION_TYP_CTR])
- mlx5dr_ste_set_counter_id(last_ste, attr->ctr_id);
-
- if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
- mlx5dr_ste_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->decap_actions,
- attr->decap_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
- mlx5dr_ste_set_rx_decap(last_ste);
-
- if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
- int i;
-
- for (i = 0; i < attr->vlans.count; i++) {
- if (i ||
- action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
- action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_RX,
- attr->gvmi);
-
- mlx5dr_ste_set_rx_pop_vlan(last_ste);
- }
- }
-
- if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
- if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_MODIFY_PKT,
- attr->gvmi);
- else
- mlx5dr_ste_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
-
- mlx5dr_ste_set_rewrite_actions(last_ste,
- attr->modify_actions,
- attr->modify_index);
- }
-
- if (action_type_set[DR_ACTION_TYP_TAG]) {
- if (mlx5dr_ste_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
- dr_actions_init_next_ste(&last_ste,
- added_stes,
- MLX5DR_STE_TYPE_RX,
- attr->gvmi);
-
- mlx5dr_ste_rx_set_flow_tag(last_ste, attr->flow_tag);
- }
-}
-
/* Apply the actions on the rule STE array starting from the last_ste.
* Actions might require more than one STE, new_num_stes will return
* the new size of the STEs array, rule with actions.
@@ -537,21 +250,20 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn,
enum mlx5dr_ste_entry_type ste_type,
u8 *action_type_set,
u8 *last_ste,
- struct dr_action_apply_attr *attr,
+ struct mlx5dr_ste_actions_attr *attr,
u32 *new_num_stes)
{
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
u32 added_stes = 0;
if (ste_type == MLX5DR_STE_TYPE_RX)
- dr_actions_apply_rx(action_type_set, last_ste, attr, &added_stes);
+ mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
+ last_ste, attr, &added_stes);
else
- dr_actions_apply_tx(dmn, action_type_set, last_ste, attr, &added_stes);
+ mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set,
+ last_ste, attr, &added_stes);
- last_ste += added_stes * DR_STE_SIZE;
*new_num_stes += added_stes;
-
- mlx5dr_ste_set_hit_gvmi(last_ste, attr->hit_gvmi);
- mlx5dr_ste_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
static enum dr_action_domain
@@ -643,9 +355,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 action_type_set[DR_ACTION_TYP_MAX] = {};
+ struct mlx5dr_ste_actions_attr attr = {};
struct mlx5dr_action *dest_action = NULL;
u32 state = DR_ACTION_STATE_NO_ACTION;
- struct dr_action_apply_attr attr = {};
enum dr_action_domain action_domain;
bool recalc_cs_required = false;
u8 *last_ste;
@@ -735,7 +447,8 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
case DR_ACTION_TYP_MODIFY_HDR:
attr.modify_index = action->rewrite.index;
attr.modify_actions = action->rewrite.num_of_actions;
- recalc_cs_required = action->rewrite.modify_ttl;
+ recalc_cs_required = action->rewrite.modify_ttl &&
+ !mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps);
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
@@ -756,12 +469,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
}
break;
case DR_ACTION_TYP_POP_VLAN:
- max_actions_type = MAX_VLANS;
+ max_actions_type = MLX5DR_MAX_VLANS;
attr.vlans.count++;
break;
case DR_ACTION_TYP_PUSH_VLAN:
- max_actions_type = MAX_VLANS;
- if (attr.vlans.count == MAX_VLANS)
+ max_actions_type = MLX5DR_MAX_VLANS;
+ if (attr.vlans.count == MLX5DR_MAX_VLANS)
return -EINVAL;
attr.vlans.headers[attr.vlans.count++] = action->push_vlan.vlan_hdr;
@@ -789,9 +502,9 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
*new_hw_ste_arr_sz = nic_matcher->num_of_builders;
last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
- /* Due to a HW bug, modifying TTL on RX flows will cause an incorrect
- * checksum calculation. In this case we will use a FW table to
- * recalculate.
+ /* Due to a HW bug in some devices, modifying TTL on RX flows will
+ * cause an incorrect checksum calculation. In this case we will
+ * use a FW table to recalculate.
*/
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
rx_rule && recalc_cs_required && dest_action) {
@@ -817,132 +530,6 @@ out_invalid_arg:
return -EINVAL;
}
-#define CVLAN_ETHERTYPE 0x8100
-#define SVLAN_ETHERTYPE 0x88a8
-#define HDR_LEN_L2_ONLY 14
-#define HDR_LEN_L2_VLAN 18
-#define REWRITE_HW_ACTION_NUM 6
-
-static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
- struct mlx5dr_action *action,
- void *data, size_t data_sz)
-{
- struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
- u64 ops[REWRITE_HW_ACTION_NUM] = {};
- u32 hdr_fld_4b;
- u16 hdr_fld_2b;
- u16 vlan_type;
- bool vlan;
- int i = 0;
- int ret;
-
- vlan = (data_sz != HDR_LEN_L2_ONLY);
-
- /* dmac_47_16 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 16);
- hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_4b);
- i++;
-
- /* smac_47_16 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 16);
- hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
- MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_4b);
- i++;
-
- /* dmac_15_0 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- i++;
-
- /* ethertype + (optional) vlan */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 32);
- if (!vlan) {
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
- MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_2b);
- MLX5_SET(dr_action_hw_set, ops + i, destination_length, 16);
- } else {
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
- vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
- hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
- MLX5_SET(dr_action_hw_set, ops + i, inline_data, hdr_fld_4b);
- MLX5_SET(dr_action_hw_set, ops + i, destination_length, 18);
- }
- i++;
-
- /* smac_15_0 */
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_1);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- i++;
-
- if (vlan) {
- MLX5_SET(dr_action_hw_set, ops + i,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
- hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
- MLX5_SET(dr_action_hw_set, ops + i,
- inline_data, hdr_fld_2b);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_length, 16);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_field_code, MLX5DR_ACTION_MDFY_HW_FLD_L2_2);
- MLX5_SET(dr_action_hw_set, ops + i,
- destination_left_shifter, 0);
- i++;
- }
-
- action->rewrite.data = (void *)ops;
- action->rewrite.num_of_actions = i;
-
- ret = mlx5dr_send_postsend_action(dmn, action);
- if (ret) {
- mlx5dr_dbg(dmn, "Writing encapsulation action to ICM failed\n");
- return ret;
- }
-
- return 0;
-}
-
static struct mlx5dr_action *
dr_action_create_generic(enum mlx5dr_action_type action_type)
{
@@ -1217,21 +804,34 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
case DR_ACTION_TYP_TNL_L3_TO_L2:
{
- /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
- if (data_sz != HDR_LEN_L2_ONLY && data_sz != HDR_LEN_L2_VLAN)
- return -EINVAL;
+ u8 hw_actions[ACTION_CACHE_LINE_SIZE] = {};
+ int ret;
+
+ ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
+ data, data_sz,
+ hw_actions,
+ ACTION_CACHE_LINE_SIZE,
+ &action->rewrite.num_of_actions);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
+ return ret;
+ }
action->rewrite.chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
DR_CHUNK_SIZE_8);
- if (!action->rewrite.chunk)
+ if (!action->rewrite.chunk) {
+ mlx5dr_dbg(dmn, "Failed allocating modify header chunk\n");
return -ENOMEM;
+ }
+ action->rewrite.data = (void *)hw_actions;
action->rewrite.index = (action->rewrite.chunk->icm_addr -
dmn->info.caps.hdr_modify_icm_addr) /
ACTION_CACHE_LINE_SIZE;
- ret = dr_actions_l2_rewrite(dmn, action, data, data_sz);
+ ret = mlx5dr_send_postsend_action(dmn, action);
if (ret) {
+ mlx5dr_dbg(dmn, "Writing decap l3 actions to ICM failed\n");
mlx5dr_icm_free_chunk(action->rewrite.chunk);
return ret;
}
@@ -1243,6 +843,9 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
}
+#define CVLAN_ETHERTYPE 0x8100
+#define SVLAN_ETHERTYPE 0x88a8
+
struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
{
return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
@@ -1315,31 +918,13 @@ dec_ref:
return NULL;
}
-static const struct dr_action_modify_field_conv *
-dr_action_modify_get_hw_info(u16 sw_field)
-{
- const struct dr_action_modify_field_conv *hw_action_info;
-
- if (sw_field >= ARRAY_SIZE(dr_action_conv_arr))
- goto not_found;
-
- hw_action_info = &dr_action_conv_arr[sw_field];
- if (!hw_action_info->end && !hw_action_info->start)
- goto not_found;
-
- return hw_action_info;
-
-not_found:
- return NULL;
-}
-
static int
dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 max_length;
u16 sw_field;
u32 data;
@@ -1349,7 +934,7 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
- hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
return -EINVAL;
@@ -1357,20 +942,12 @@ dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
max_length = hw_action_info->end - hw_action_info->start + 1;
- MLX5_SET(dr_action_hw_set, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_ADD);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
- hw_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
- hw_action_info->start);
-
- /* PRM defines that length zero specific length of 32bits */
- MLX5_SET(dr_action_hw_set, hw_action, destination_length,
- max_length == 32 ? 0 : max_length);
-
- MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+ mlx5dr_ste_set_action_add(dmn->ste_ctx,
+ hw_action,
+ hw_action_info->hw_field,
+ hw_action_info->start,
+ max_length,
+ data);
*ret_hw_info = hw_action_info;
@@ -1381,9 +958,9 @@ static int
dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_hw_info)
{
- const struct dr_action_modify_field_conv *hw_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_action_info;
u8 offset, length, max_length;
u16 sw_field;
u32 data;
@@ -1395,7 +972,7 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
data = MLX5_GET(set_action_in, sw_action, data);
/* Convert SW data to HW modify action format */
- hw_action_info = dr_action_modify_get_hw_info(sw_field);
+ hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
if (!hw_action_info) {
mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
return -EINVAL;
@@ -1411,19 +988,12 @@ dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- MLX5_SET(dr_action_hw_set, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_SET);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_field_code,
- hw_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter,
- hw_action_info->start + offset);
-
- MLX5_SET(dr_action_hw_set, hw_action, destination_length,
- length == 32 ? 0 : length);
-
- MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+ mlx5dr_ste_set_action_set(dmn->ste_ctx,
+ hw_action,
+ hw_action_info->hw_field,
+ hw_action_info->start + offset,
+ length,
+ data);
*ret_hw_info = hw_action_info;
@@ -1434,12 +1004,12 @@ static int
dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_dst_hw_info,
- const struct dr_action_modify_field_conv **ret_src_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+ const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
- const struct dr_action_modify_field_conv *hw_dst_action_info;
- const struct dr_action_modify_field_conv *hw_src_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
u16 src_field, dst_field;
/* Get SW modify action data */
@@ -1450,8 +1020,8 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
length = MLX5_GET(copy_action_in, sw_action, length);
/* Convert SW data to HW modify action format */
- hw_src_action_info = dr_action_modify_get_hw_info(src_field);
- hw_dst_action_info = dr_action_modify_get_hw_info(dst_field);
+ hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field);
+ hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field);
if (!hw_src_action_info || !hw_dst_action_info) {
mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
return -EINVAL;
@@ -1471,23 +1041,13 @@ dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
return -EINVAL;
}
- MLX5_SET(dr_action_hw_copy, hw_action,
- opcode, MLX5DR_ACTION_MDFY_HW_OP_COPY);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code,
- hw_dst_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter,
- hw_dst_action_info->start + dst_offset);
-
- MLX5_SET(dr_action_hw_copy, hw_action, destination_length,
- length == 32 ? 0 : length);
-
- MLX5_SET(dr_action_hw_copy, hw_action, source_field_code,
- hw_src_action_info->hw_field);
-
- MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter,
- hw_src_action_info->start + dst_offset);
+ mlx5dr_ste_set_action_copy(dmn->ste_ctx,
+ hw_action,
+ hw_dst_action_info->hw_field,
+ hw_dst_action_info->start + dst_offset,
+ length,
+ hw_src_action_info->hw_field,
+ hw_src_action_info->start + src_offset);
*ret_dst_hw_info = hw_dst_action_info;
*ret_src_hw_info = hw_src_action_info;
@@ -1499,8 +1059,8 @@ static int
dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
__be64 *sw_action,
__be64 *hw_action,
- const struct dr_action_modify_field_conv **ret_dst_hw_info,
- const struct dr_action_modify_field_conv **ret_src_hw_info)
+ const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+ const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
{
u8 action;
int ret;
@@ -1677,15 +1237,15 @@ static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
u32 *num_hw_actions,
bool *modify_ttl)
{
- const struct dr_action_modify_field_conv *hw_dst_action_info;
- const struct dr_action_modify_field_conv *hw_src_action_info;
- u16 hw_field = MLX5DR_ACTION_MDFY_HW_FLD_RESERVED;
- u32 l3_type = MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE;
- u32 l4_type = MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE;
+ const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+ const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
struct mlx5dr_domain *dmn = action->rewrite.dmn;
int ret, i, hw_idx = 0;
__be64 *sw_action;
__be64 hw_action;
+ u16 hw_field = 0;
+ u32 l3_type = 0;
+ u32 l4_type = 0;
*modify_ttl = false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index ba65ec406cfa..30b0136b5bc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -78,9 +78,9 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
caps->uplink_icm_address_tx =
MLX5_CAP64_ESW_FLOWTABLE(mdev,
sw_steering_uplink_icm_address_tx);
- caps->sw_owner =
- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
- sw_owner);
+ caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
+ if (!caps->sw_owner_v2)
+ caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
return 0;
}
@@ -113,10 +113,15 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->nic_tx_allow_address =
MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
- caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
- caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
+ caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
+ caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
+
+ if (!caps->rx_sw_owner_v2)
+ caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
+ if (!caps->tx_sw_owner_v2)
+ caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
- caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
+ caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
caps->hdr_modify_icm_addr =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index aa2c2d6c44e6..7091b1be84ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -4,6 +4,11 @@
#include <linux/mlx5/eswitch.h>
#include "dr_types.h"
+#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
+ ((dmn)->info.caps.dmn_type##_sw_owner || \
+ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
+ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
+
static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
{
/* Per vport cached FW FT for checksum recalculation, this
@@ -57,6 +62,12 @@ static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
{
int ret;
+ dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
+ if (!dmn->ste_ctx) {
+ mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
if (ret) {
mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
@@ -181,6 +192,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
return ret;
dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
+ dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
@@ -223,18 +235,13 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
if (ret)
return ret;
- if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
- mlx5dr_err(dmn, "SW steering is not supported on this device\n");
- return -EOPNOTSUPP;
- }
-
ret = dr_domain_query_fdb_caps(mdev, dmn);
if (ret)
return ret;
switch (dmn->type) {
case MLX5DR_DOMAIN_TYPE_NIC_RX:
- if (!dmn->info.caps.rx_sw_owner)
+ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
@@ -243,7 +250,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
break;
case MLX5DR_DOMAIN_TYPE_NIC_TX:
- if (!dmn->info.caps.tx_sw_owner)
+ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
return -ENOTSUPP;
dmn->info.supp_sw_steering = true;
@@ -255,7 +262,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
if (!dmn->info.caps.eswitch_manager)
return -ENOTSUPP;
- if (!dmn->info.caps.fdb_sw_owner)
+ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
return -ENOTSUPP;
dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index 6527eb4df153..15673cd10039 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -113,7 +113,8 @@ dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
static bool
dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED;
+ return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
}
static bool
@@ -135,7 +136,8 @@ static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
static bool
dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED;
+ return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
}
static bool
@@ -148,12 +150,14 @@ dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED;
+ return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
}
static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
{
- return caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED;
+ return (caps->sw_format_ver == MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+ (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
}
static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
@@ -221,6 +225,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_match_param mask = {};
struct mlx5dr_ste_build *sb;
bool inner, rx;
@@ -259,80 +264,89 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = false;
if (dr_mask_is_wqe_metadata_set(&mask.misc2))
- mlx5dr_ste_build_general_purpose(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
- mlx5dr_ste_build_register_0(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
- mlx5dr_ste_build_register_1(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
- mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
- dmn, inner, rx);
+ mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
+ &mask, dmn, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
- mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer))
- mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (outer_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
- mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.outer))
- mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
- mlx5dr_ste_build_tnl_vxlan_gpe(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
else if (dr_mask_is_tnl_geneve(&mask, dmn))
- mlx5dr_ste_build_tnl_geneve(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
- mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
- mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_icmp(&mask, dmn)) {
- ret = mlx5dr_ste_build_icmp(&sb[idx++],
+ ret = mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
&mask, &dmn->info.caps,
inner, rx);
if (ret)
return ret;
}
if (dr_mask_is_tnl_gre_set(&mask.misc))
- mlx5dr_ste_build_tnl_gre(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
/* Inner */
@@ -343,50 +357,56 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
inner = true;
if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
- mlx5dr_ste_build_eth_l2_tnl(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
- mlx5dr_ste_build_eth_l2_src_dst(&sb[idx++],
+ mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
&mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.inner))
- mlx5dr_ste_build_eth_l2_src(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_eth_l2_dst(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (inner_ipv == DR_RULE_IPV6) {
if (dr_mask_is_dst_addr_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv6_dst(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_src_addr_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv6_src(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
- mlx5dr_ste_build_eth_ipv6_l3_l4(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
} else {
if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv4_5_tuple(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (dr_mask_is_ttl_set(&mask.inner))
- mlx5dr_ste_build_eth_l3_ipv4_misc(&sb[idx++], &mask,
- inner, rx);
+ mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
- mlx5dr_ste_build_eth_l4_misc(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
- mlx5dr_ste_build_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
if (DR_MASK_IS_TNL_MPLS_SET(mask.misc2))
- mlx5dr_ste_build_tnl_mpls(&sb[idx++], &mask, inner, rx);
+ mlx5dr_ste_build_tnl_mpls(ste_ctx, &sb[idx++],
+ &mask, inner, rx);
}
/* Empty matcher, takes all */
if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 6d73719db1f4..b337d6626bff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -10,7 +10,8 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
-static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
+static int dr_rule_append_to_miss_list(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *new_last_ste,
struct list_head *miss_list,
struct list_head *send_list)
{
@@ -25,11 +26,11 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
if (!ste_info_last)
return -ENOMEM;
- mlx5dr_ste_set_miss_addr(last_ste->hw_ste,
+ mlx5dr_ste_set_miss_addr(ste_ctx, last_ste->hw_ste,
mlx5dr_ste_get_icm_addr(new_last_ste));
list_add_tail(&new_last_ste->miss_list_node, miss_list);
- mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_REDUCED,
+ mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
0, last_ste->hw_ste,
ste_info_last, send_list, true);
@@ -42,6 +43,7 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
u8 *hw_ste)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_htbl *new_htbl;
struct mlx5dr_ste *ste;
@@ -57,7 +59,8 @@ dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
/* One and only entry, never grows */
ste = new_htbl->ste_arr;
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
mlx5dr_htbl_get(new_htbl);
return ste;
@@ -103,14 +106,19 @@ dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
int ret;
list_del(&ste_info->send_list);
+
+ /* Copy data to ste, only reduced size or control, the last 16B (mask)
+ * is already written to the hw.
+ */
+ if (ste_info->size == DR_STE_SIZE_CTRL)
+ memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_CTRL);
+ else
+ memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
+
ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
ste_info->size, ste_info->offset);
if (ret)
goto out;
- /* Copy data to ste, only reduced size, the last 16B (mask)
- * is already written to the hw.
- */
- memcpy(ste_info->ste->hw_ste, ste_info->data, DR_STE_SIZE_REDUCED);
out:
kfree(ste_info);
@@ -169,6 +177,7 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste *col_ste,
u8 *hw_ste)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste *new_ste;
int ret;
@@ -180,11 +189,11 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste);
/* Update the previous from the list */
- ret = dr_rule_append_to_miss_list(new_ste,
+ ret = dr_rule_append_to_miss_list(dmn->ste_ctx, new_ste,
mlx5dr_ste_get_miss_list(col_ste),
update_list);
if (ret) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed update dup entry\n");
+ mlx5dr_dbg(dmn, "Failed update dup entry\n");
goto err_exit;
}
@@ -224,6 +233,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *new_htbl,
struct list_head *update_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
bool use_update_list = false;
u8 hw_ste[DR_STE_SIZE] = {};
@@ -237,7 +247,8 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
/* Copy STE control and tag */
memcpy(hw_ste, cur_ste->hw_ste, DR_STE_SIZE_REDUCED);
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx];
@@ -253,7 +264,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_ste,
hw_ste);
if (!new_ste) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed adding collision entry, index: %d\n",
+ mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
new_idx);
return NULL;
}
@@ -391,7 +402,8 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* Write new table to HW */
info.type = CONNECT_MISS;
info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr;
- mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+ dmn->info.caps.gvmi,
nic_dmn,
new_htbl,
formatted_ste,
@@ -436,18 +448,20 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
/* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
* (48B len) which works only on first 32B
*/
- mlx5dr_ste_set_hit_addr(prev_htbl->ste_arr[0].hw_ste,
+ mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
+ prev_htbl->ste_arr[0].hw_ste,
new_htbl->chunk->icm_addr,
new_htbl->chunk->num_of_entries);
ste_to_update = &prev_htbl->ste_arr[0];
} else {
- mlx5dr_ste_set_hit_addr_by_next_htbl(cur_htbl->pointing_ste->hw_ste,
+ mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+ cur_htbl->pointing_ste->hw_ste,
new_htbl);
ste_to_update = cur_htbl->pointing_ste;
}
- mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_REDUCED,
+ mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
0, ste_to_update->hw_ste, ste_info,
update_list, false);
@@ -496,6 +510,8 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info *ste_info;
struct mlx5dr_ste *new_ste;
@@ -507,8 +523,9 @@ dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
if (!new_ste)
goto free_send_info;
- if (dr_rule_append_to_miss_list(new_ste, miss_list, send_list)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed to update prev miss_list\n");
+ if (dr_rule_append_to_miss_list(ste_ctx, new_ste,
+ miss_list, send_list)) {
+ mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
goto err_exit;
}
@@ -659,6 +676,7 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
u8 num_of_builders = nic_matcher->num_of_builders;
struct mlx5dr_matcher *matcher = rule->matcher;
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
u8 *curr_hw_ste, *prev_hw_ste;
struct mlx5dr_ste *action_ste;
int i, k, ret;
@@ -692,10 +710,12 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
goto err_exit;
/* Point current ste to the new action */
- mlx5dr_ste_set_hit_addr_by_next_htbl(prev_hw_ste, action_ste->htbl);
+ mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+ prev_hw_ste,
+ action_ste->htbl);
ret = dr_rule_add_member(nic_rule, action_ste);
if (ret) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed adding rule member\n");
+ mlx5dr_dbg(dmn, "Failed adding rule member\n");
goto free_ste_info;
}
mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
@@ -722,6 +742,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
struct list_head *miss_list,
struct list_head *send_list)
{
+ struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_ste_send_info *ste_info;
/* Take ref on table, only on first time this ste is used */
@@ -730,7 +751,8 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
/* new entry -> new branch */
list_add_tail(&ste->miss_list_node, miss_list);
- mlx5dr_ste_set_miss_addr(hw_ste, nic_matcher->e_anchor->chunk->icm_addr);
+ mlx5dr_ste_set_miss_addr(dmn->ste_ctx, hw_ste,
+ nic_matcher->e_anchor->chunk->icm_addr);
ste->ste_chain_location = ste_location;
@@ -743,7 +765,7 @@ static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
ste,
hw_ste,
DR_CHUNK_SIZE_1)) {
- mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+ mlx5dr_dbg(dmn, "Failed allocating table\n");
goto clean_ste_info;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 24dede1b0a20..83c4c877d558 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -431,6 +431,8 @@ int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
{
struct postsend_info send_info = {};
+ mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size);
+
send_info.write.addr = (uintptr_t)data;
send_info.write.length = size;
send_info.write.lkey = 0;
@@ -457,6 +459,8 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
if (ret)
return ret;
+ mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE);
+
/* Send the data iteration times */
for (i = 0; i < iterations; i++) {
u32 ste_index = i * (byte_size / DR_STE_SIZE);
@@ -480,6 +484,10 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
/* Copy bit_mask */
memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
mask, DR_STE_SIZE_MASK);
+ /* Only when we have mask we need to re-arrange the STE */
+ mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx,
+ data + (j * DR_STE_SIZE),
+ DR_STE_SIZE);
}
}
@@ -509,6 +517,7 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
u32 byte_size = htbl->chunk->byte_size;
int iterations;
int num_stes;
+ u8 *copy_dst;
u8 *data;
int ret;
int i;
@@ -518,20 +527,22 @@ int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
if (ret)
return ret;
- for (i = 0; i < num_stes; i++) {
- u8 *copy_dst;
-
- /* Copy the same ste on the data buffer */
- copy_dst = data + i * DR_STE_SIZE;
- memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
-
- if (update_hw_ste) {
- /* Copy the reduced ste to hash table ste_arr */
+ if (update_hw_ste) {
+ /* Copy the reduced STE to hash table ste_arr */
+ for (i = 0; i < num_stes; i++) {
copy_dst = htbl->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
}
}
+ mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE);
+
+ /* Copy the same STE on the data buffer */
+ for (i = 0; i < num_stes; i++) {
+ copy_dst = data + i * DR_STE_SIZE;
+ memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
+ }
+
/* Send the data iteration times */
for (i = 0; i < iterations; i++) {
u8 ste_index = i * (byte_size / DR_STE_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index d275823bff2f..f49abc7a4b9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -3,104 +3,7 @@
#include <linux/types.h>
#include <linux/crc32.h>
-#include "dr_types.h"
-
-#define DR_STE_CRC_POLY 0xEDB88320L
-#define STE_IPV4 0x1
-#define STE_IPV6 0x2
-#define STE_TCP 0x1
-#define STE_UDP 0x2
-#define STE_SPI 0x3
-#define IP_VERSION_IPV4 0x4
-#define IP_VERSION_IPV6 0x6
-#define STE_SVLAN 0x1
-#define STE_CVLAN 0x2
-
-#define DR_STE_ENABLE_FLOW_TAG BIT(31)
-
-/* Set to STE a specific value using DR_STE_SET */
-#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
- if ((spec)->s_fname) { \
- MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
- (spec)->s_fname = 0; \
- } \
-} while (0)
-
-/* Set to STE spec->s_fname to tag->t_fname */
-#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
-
-/* Set to STE -1 to bit_mask->bm_fname and set spec->s_fname as used */
-#define DR_STE_SET_MASK(lookup_type, bit_mask, bm_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, -1)
-
-/* Set to STE spec->s_fname to bit_mask->bm_fname and set spec->s_fname as used */
-#define DR_STE_SET_MASK_V(lookup_type, bit_mask, bm_fname, spec, s_fname) \
- DR_STE_SET_VAL(lookup_type, bit_mask, bm_fname, spec, s_fname, (spec)->s_fname)
-
-#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
- MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
- MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
-} while (0)
-
-#define DR_STE_SET_MPLS_MASK(lookup_type, mask, in_out, bit_mask) do { \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_label, mask, \
- in_out##_first_mpls_label);\
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_s_bos, mask, \
- in_out##_first_mpls_s_bos); \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_exp, mask, \
- in_out##_first_mpls_exp); \
- DR_STE_SET_MASK_V(lookup_type, mask, mpls0_ttl, mask, \
- in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_SET_MPLS_TAG(lookup_type, mask, in_out, tag) do { \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_label, mask, \
- in_out##_first_mpls_label);\
- DR_STE_SET_TAG(lookup_type, tag, mpls0_s_bos, mask, \
- in_out##_first_mpls_s_bos); \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_exp, mask, \
- in_out##_first_mpls_exp); \
- DR_STE_SET_TAG(lookup_type, tag, mpls0_ttl, mask, \
- in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
- (_misc)->outer_first_mpls_over_gre_label || \
- (_misc)->outer_first_mpls_over_gre_exp || \
- (_misc)->outer_first_mpls_over_gre_s_bos || \
- (_misc)->outer_first_mpls_over_gre_ttl)
-#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
- (_misc)->outer_first_mpls_over_udp_label || \
- (_misc)->outer_first_mpls_over_udp_exp || \
- (_misc)->outer_first_mpls_over_udp_s_bos || \
- (_misc)->outer_first_mpls_over_udp_ttl)
-
-#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
- ((inner) ? MLX5DR_STE_LU_TYPE_##lookup_type##_I : \
- (rx) ? MLX5DR_STE_LU_TYPE_##lookup_type##_D : \
- MLX5DR_STE_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_tunl_action {
- DR_STE_TUNL_ACTION_NONE = 0,
- DR_STE_TUNL_ACTION_ENABLE = 1,
- DR_STE_TUNL_ACTION_DECAP = 2,
- DR_STE_TUNL_ACTION_L3_DECAP = 3,
- DR_STE_TUNL_ACTION_POP_VLAN = 4,
-};
-
-enum dr_ste_action_type {
- DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
- DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
- DR_STE_ACTION_TYPE_ENCAP = 4,
-};
+#include "dr_ste.h"
struct dr_hw_ste_format {
u8 ctrl[DR_STE_SIZE_CTRL];
@@ -115,6 +18,11 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
return (__force u32)htonl(crc);
}
+bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
+{
+ return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
+}
+
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -142,7 +50,7 @@ u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
return index;
}
-static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
{
u16 byte_mask = 0;
int i;
@@ -155,7 +63,7 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
return byte_mask;
}
-static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
+static u8 *dr_ste_get_tag(u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -169,104 +77,6 @@ void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
}
-void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
- DR_STE_ENABLE_FLOW_TAG | flow_tag);
-}
-
-void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
-{
- /* This can be used for both rx_steering_mult and for sx_transmit */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
-}
-
-void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
-}
-
-void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
- bool go_back)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
- DR_STE_ACTION_TYPE_PUSH_VLAN);
- MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
- /* Due to HW limitation we need to set this bit, otherwise reforamt +
- * push vlan will not work.
- */
- if (go_back)
- mlx5dr_ste_set_go_back_bit(hw_ste_p);
-}
-
-void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id, int size, bool encap_l3)
-{
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
- encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
- /* The hardware expects here size in words (2 byte) */
- MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
- MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
-}
-
-void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_DECAP);
-}
-
-void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_POP_VLAN);
-}
-
-void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
-{
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
- DR_STE_TUNL_ACTION_L3_DECAP);
- MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
-}
-
-void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type)
-{
- MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
-}
-
-u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p)
-{
- return MLX5_GET(ste_general, hw_ste_p, entry_type);
-}
-
-void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
- u32 re_write_index)
-{
- MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
- num_of_actions);
- MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
- re_write_index);
-}
-
-void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
-{
- MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
-}
-
-void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type,
- u16 gvmi)
-{
- MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
- MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
- MLX5_SET(ste_general, hw_ste_p, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
-
- /* Set GVMI once, this is the same for RX/TX
- * bits 63_48 of next table base / miss address encode the next GVMI
- */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
-}
-
static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
{
memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
@@ -279,21 +89,26 @@ static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
hw_ste->mask[0] = 0;
}
-u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste)
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p, u64 miss_addr)
{
- u64 index =
- (MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_31_6) |
- MLX5_GET(ste_rx_steering_mult, hw_ste, miss_address_39_32) << 26);
-
- return index << 6;
+ ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
}
-void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size)
+static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste, u64 miss_addr)
{
- u64 index = (icm_addr >> 5) | ht_size;
+ u8 *hw_ste_p = ste->hw_ste;
+
+ ste_ctx->set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+ ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
+ dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+}
- MLX5_SET(ste_general, hw_ste, next_table_base_39_32_size, index >> 27);
- MLX5_SET(ste_general, hw_ste, next_table_base_31_5_size, index);
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 icm_addr, u32 ht_size)
+{
+ ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
}
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
@@ -317,15 +132,16 @@ struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
return &ste->htbl->miss_list[index];
}
-static void dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
+static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
u8 *hw_ste = ste->hw_ste;
- MLX5_SET(ste_general, hw_ste, byte_mask, next_htbl->byte_mask);
- MLX5_SET(ste_general, hw_ste, next_lu_type, next_htbl->lu_type);
- mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
+ ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
+ ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
dr_ste_set_always_hit((struct dr_hw_ste_format *)ste->hw_ste);
}
@@ -363,7 +179,8 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
/* Free ste which is the head and the only one in miss_list */
static void
-dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
+dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_ste_send_info *ste_info_head,
struct list_head *send_ste_list,
@@ -380,7 +197,7 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
*/
memcpy(tmp_ste.hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
miss_addr = nic_matcher->e_anchor->chunk->icm_addr;
- mlx5dr_ste_always_miss_addr(&tmp_ste, miss_addr);
+ dr_ste_always_miss_addr(ste_ctx, &tmp_ste, miss_addr);
memcpy(ste->hw_ste, tmp_ste.hw_ste, DR_STE_SIZE_REDUCED);
list_del_init(&ste->miss_list_node);
@@ -399,13 +216,17 @@ dr_ste_remove_head_ste(struct mlx5dr_ste *ste,
* |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
*/
static void
-dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
+dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
+ struct mlx5dr_ste *ste,
+ struct mlx5dr_ste *next_ste,
struct mlx5dr_ste_send_info *ste_info_head,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *stats_tbl)
{
struct mlx5dr_ste_htbl *next_miss_htbl;
+ u8 hw_ste[DR_STE_SIZE] = {};
+ int sb_idx;
next_miss_htbl = next_ste->htbl;
@@ -418,13 +239,19 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
/* Move data from next into ste */
dr_ste_replace(ste, next_ste);
+ /* Copy all 64 hw_ste bytes */
+ memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED);
+ sb_idx = ste->ste_chain_location - 1;
+ mlx5dr_ste_set_bit_mask(hw_ste,
+ nic_matcher->ste_builder[sb_idx].bit_mask);
+
/* Del the htbl that contains the next_ste.
* The origin htbl stay with the same number of entries.
*/
mlx5dr_htbl_put(next_miss_htbl);
- mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE_REDUCED,
- 0, ste->hw_ste,
+ mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
+ 0, hw_ste,
ste_info_head,
send_ste_list,
true /* Copy data */);
@@ -436,7 +263,8 @@ dr_ste_replace_head_ste(struct mlx5dr_ste *ste, struct mlx5dr_ste *next_ste,
/* Free ste that is located in the middle of the miss list:
* |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
*/
-static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
+static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste *ste,
struct mlx5dr_ste_send_info *ste_info,
struct list_head *send_ste_list,
struct mlx5dr_ste_htbl *stats_tbl)
@@ -448,10 +276,10 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
if (WARN_ON(!prev_ste))
return;
- miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
- mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
+ miss_addr = ste_ctx->get_miss_addr(ste->hw_ste);
+ ste_ctx->set_miss_addr(prev_ste->hw_ste, miss_addr);
- mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_REDUCED, 0,
+ mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
prev_ste->hw_ste, ste_info,
send_ste_list, true /* Copy data*/);
@@ -467,6 +295,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
{
struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_send_info ste_info_head;
struct mlx5dr_ste *next_ste, *first_ste;
bool put_on_origin_table = true;
@@ -495,18 +324,22 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
if (!next_ste) {
/* One and only entry in the list */
- dr_ste_remove_head_ste(ste, nic_matcher,
+ dr_ste_remove_head_ste(ste_ctx, ste,
+ nic_matcher,
&ste_info_head,
&send_ste_list,
stats_tbl);
} else {
/* First but not only entry in the list */
- dr_ste_replace_head_ste(ste, next_ste, &ste_info_head,
+ dr_ste_replace_head_ste(nic_matcher, ste,
+ next_ste, &ste_info_head,
&send_ste_list, stats_tbl);
put_on_origin_table = false;
}
} else { /* Ste in the middle of the list */
- dr_ste_remove_middle_ste(ste, &ste_info_head, &send_ste_list, stats_tbl);
+ dr_ste_remove_middle_ste(ste_ctx, ste,
+ &ste_info_head, &send_ste_list,
+ stats_tbl);
}
/* Update HW */
@@ -530,34 +363,25 @@ bool mlx5dr_ste_equal_tag(void *src, void *dst)
return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
}
-void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl)
{
struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
- mlx5dr_ste_set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
+ ste_ctx->set_hit_addr(hw_ste, chunk->icm_addr, chunk->num_of_entries);
}
-void mlx5dr_ste_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p, u32 ste_size)
{
- u64 index = miss_addr >> 6;
-
- /* Miss address for TX and RX STEs located in the same offsets */
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
- MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
-}
-
-void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
-{
- u8 *hw_ste = ste->hw_ste;
-
- MLX5_SET(ste_rx_steering_mult, hw_ste, next_lu_type, MLX5DR_STE_LU_TYPE_DONT_CARE);
- mlx5dr_ste_set_miss_addr(hw_ste, miss_addr);
- dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
+ if (ste_ctx->prepare_for_postsend)
+ ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
}
/* Init one ste as a pattern for ste data array */
-void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
@@ -565,13 +389,13 @@ void mlx5dr_ste_set_formatted_ste(u16 gvmi,
{
struct mlx5dr_ste ste = {};
- mlx5dr_ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
+ ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi);
ste.hw_ste = formatted_ste;
if (connect_info->type == CONNECT_HIT)
- dr_ste_always_hit_htbl(&ste, connect_info->hit_next_htbl);
+ dr_ste_always_hit_htbl(ste_ctx, &ste, connect_info->hit_next_htbl);
else
- mlx5dr_ste_always_miss_addr(&ste, connect_info->miss_icm_addr);
+ dr_ste_always_miss_addr(ste_ctx, &ste, connect_info->miss_icm_addr);
}
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
@@ -582,7 +406,8 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
{
u8 formatted_ste[DR_STE_SIZE] = {};
- mlx5dr_ste_set_formatted_ste(dmn->info.caps.gvmi,
+ mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+ dmn->info.caps.gvmi,
nic_dmn,
htbl,
formatted_ste,
@@ -597,18 +422,18 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
u8 *cur_hw_ste,
enum mlx5dr_icm_chunk_size log_table_size)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)cur_hw_ste;
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_htbl_connect_info info;
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
- u8 next_lu_type;
+ u16 next_lu_type;
u16 byte_mask;
- next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
- byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
+ next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
+ byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
@@ -628,7 +453,8 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
goto free_table;
}
- mlx5dr_ste_set_hit_addr_by_next_htbl(cur_hw_ste, next_htbl);
+ mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
+ cur_hw_ste, next_htbl);
ste->next_htbl = next_htbl;
next_htbl->pointing_ste = ste;
}
@@ -657,7 +483,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
- u8 lu_type, u16 byte_mask)
+ u16 lu_type, u16 byte_mask)
{
struct mlx5dr_icm_chunk *chunk;
struct mlx5dr_ste_htbl *htbl;
@@ -709,6 +535,92 @@ int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
return 0;
}
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ ste_ctx->set_actions_tx(dmn, action_type_set, hw_ste_arr,
+ attr, added_stes);
+}
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ ste_ctx->set_actions_rx(dmn, action_type_set, hw_ste_arr,
+ attr, added_stes);
+}
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
+{
+ const struct mlx5dr_ste_action_modify_field *hw_field;
+
+ if (sw_field >= ste_ctx->modify_field_arr_sz)
+ return NULL;
+
+ hw_field = &ste_ctx->modify_field_arr[sw_field];
+ if (!hw_field->end && !hw_field->start)
+ return NULL;
+
+ return hw_field;
+}
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ ste_ctx->set_action_set((u8 *)hw_action,
+ hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ ste_ctx->set_action_add((u8 *)hw_action,
+ hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ ste_ctx->set_action_copy((u8 *)hw_action,
+ dst_hw_field, dst_shifter, dst_len,
+ src_hw_field, src_shifter);
+}
+
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+ void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
+ if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
+ return -EINVAL;
+
+ return ste_ctx->set_action_decap_l3_list(data, data_sz,
+ hw_action, hw_action_sz,
+ used_hw_action_num);
+}
+
int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
u8 match_criteria,
struct mlx5dr_match_param *mask,
@@ -738,6 +650,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
{
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+ struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
struct mlx5dr_ste_build *sb;
int ret, i;
@@ -748,14 +661,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
sb = nic_matcher->ste_builder;
for (i = 0; i < nic_matcher->num_of_builders; i++) {
- mlx5dr_ste_init(ste_arr,
- sb->lu_type,
- nic_dmn->ste_type,
- dmn->info.caps.gvmi);
+ ste_ctx->ste_init(ste_arr,
+ sb->lu_type,
+ nic_dmn->ste_type,
+ dmn->info.caps.gvmi);
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
- ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
+ ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
if (ret)
return ret;
@@ -765,45 +678,14 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
* not relevant for the last ste in the chain.
*/
sb++;
- MLX5_SET(ste_general, ste_arr, next_lu_type, sb->lu_type);
- MLX5_SET(ste_general, ste_arr, byte_mask, sb->byte_mask);
+ ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
+ ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
}
ste_arr += DR_STE_SIZE;
}
return 0;
}
-static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
- if (mask->smac_47_16 || mask->smac_15_0) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
- mask->smac_47_16 >> 16);
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
- mask->smac_47_16 << 16 | mask->smac_15_0);
- mask->smac_47_16 = 0;
- mask->smac_15_0 = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
-
- if (mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- } else if (mask->svlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
- mask->svlan_tag = 0;
- }
-}
-
static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
{
spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
@@ -1045,566 +927,93 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
}
}
-static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
-
- if (spec->smac_47_16 || spec->smac_15_0) {
- MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
- spec->smac_47_16 >> 16);
- MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
- spec->smac_47_16 << 16 | spec->smac_15_0);
- spec->smac_47_16 = 0;
- spec->smac_15_0 = 0;
- }
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- pr_info("Unsupported ip_version value\n");
- return -EINVAL;
- }
- }
-
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_127_96, mask, dst_ip_127_96);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_95_64, mask, dst_ip_95_64);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_63_32, mask, dst_ip_63_32);
- DR_STE_SET_MASK_V(eth_l3_ipv6_dst, bit_mask, dst_ip_31_0, mask, dst_ip_31_0);
+ ste_ctx->build_eth_l2_src_dst_init(sb, mask);
}
-static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
- DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv6_dst_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_dst_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_127_96, mask, src_ip_127_96);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_95_64, mask, src_ip_95_64);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_63_32, mask, src_ip_63_32);
- DR_STE_SET_MASK_V(eth_l3_ipv6_src, bit_mask, src_ip_31_0, mask, src_ip_31_0);
-}
-
-static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
- DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
-
- return 0;
+ ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv6_src_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv6_src_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param *value,
- bool inner,
- u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_address, mask, dst_ip_31_0);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_address, mask, src_ip_31_0);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_port, mask, tcp_dport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- destination_port, mask, udp_dport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_port, mask, tcp_sport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- source_port, mask, udp_sport);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- protocol, mask, ip_protocol);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- dscp, mask, ip_dscp);
- DR_STE_SET_MASK_V(eth_l3_ipv4_5_tuple, bit_mask,
- ecn, mask, ip_ecn);
-
- if (mask->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, bit_mask, mask);
- mask->tcp_flags = 0;
- }
-}
-
-static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
- DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
-
- if (spec->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
- spec->tcp_flags = 0;
- }
-
- return 0;
+ ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_5_tuple_tag;
-}
-
-static void
-dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
- DR_STE_SET_MASK(eth_l2_src, bit_mask, l3_type, mask, ip_version);
-
- if (mask->svlan_tag || mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- mask->svlan_tag = 0;
- }
-
- if (inner) {
- if (misc_mask->inner_second_cvlan_tag ||
- misc_mask->inner_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
- misc_mask->inner_second_cvlan_tag = 0;
- misc_mask->inner_second_svlan_tag = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_vlan_id, misc_mask, inner_second_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_cfi, misc_mask, inner_second_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_priority, misc_mask, inner_second_prio);
- } else {
- if (misc_mask->outer_second_cvlan_tag ||
- misc_mask->outer_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
- misc_mask->outer_second_cvlan_tag = 0;
- misc_mask->outer_second_svlan_tag = 0;
- }
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_vlan_id, misc_mask, outer_second_vid);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_cfi, misc_mask, outer_second_cfi);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask,
- second_priority, misc_mask, outer_second_prio);
- }
+ ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
}
-static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
- bool inner, u8 *tag)
-{
- struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc_spec = &value->misc;
-
- DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
- DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- pr_info("Unsupported ip_version value\n");
- return -EINVAL;
- }
- }
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
-
- if (inner) {
- if (misc_spec->inner_second_cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
- misc_spec->inner_second_cvlan_tag = 0;
- } else if (misc_spec->inner_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
- misc_spec->inner_second_svlan_tag = 0;
- }
-
- DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
- } else {
- if (misc_spec->outer_second_cvlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
- misc_spec->outer_second_cvlan_tag = 0;
- } else if (misc_spec->outer_second_svlan_tag) {
- MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
- misc_spec->outer_second_svlan_tag = 0;
- }
- DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
- DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
- DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
- }
-
- return 0;
-}
-
-static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
- DR_STE_SET_MASK_V(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
-
- dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
- DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
-
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
-}
-
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_src_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_tag;
-}
-
-static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
- dr_ste_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
-
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+ ste_ctx->build_eth_l2_src_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l2_dst_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_dst_tag;
-}
-
-static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
- DR_STE_SET_MASK(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
-
- if (misc->vxlan_vni) {
- MLX5_SET(ste_eth_l2_tnl, bit_mask,
- l2_tunneling_network_id, (misc->vxlan_vni << 8));
- misc->vxlan_vni = 0;
- }
-
- if (mask->svlan_tag || mask->cvlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
- mask->cvlan_tag = 0;
- mask->svlan_tag = 0;
- }
+ ste_ctx->build_eth_l2_dst_init(sb, mask);
}
-static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
- DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
- DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
- DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
-
- if (misc->vxlan_vni) {
- MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
- (misc->vxlan_vni << 8));
- misc->vxlan_vni = 0;
- }
-
- if (spec->cvlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
- spec->cvlan_tag = 0;
- } else if (spec->svlan_tag) {
- MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
- spec->svlan_tag = 0;
- }
-
- if (spec->ip_version) {
- if (spec->ip_version == IP_VERSION_IPV4) {
- MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
- spec->ip_version = 0;
- } else if (spec->ip_version == IP_VERSION_IPV6) {
- MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
- spec->ip_version = 0;
- } else {
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask, bool inner, bool rx)
{
- dr_ste_build_eth_l2_tnl_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l2_tnl_tag;
-}
-
-static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l3_ipv4_misc, bit_mask, time_to_live, mask, ttl_hoplimit);
+ ste_ctx->build_eth_l2_tnl_init(sb, mask);
}
-static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l3_ipv4_misc_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l3_ipv4_misc_tag;
-}
-
-static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, tcp_dport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, tcp_sport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dst_port, mask, udp_dport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, src_port, mask, udp_sport);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, protocol, mask, ip_protocol);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, fragmented, mask, frag);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, dscp, mask, ip_dscp);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, ecn, mask, ip_ecn);
- DR_STE_SET_MASK_V(eth_l4, bit_mask, ipv6_hop_limit, mask, ttl_hoplimit);
-
- if (mask->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l4, bit_mask, mask);
- mask->tcp_flags = 0;
- }
+ ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
}
-static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
- DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
- DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
- DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
- DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
- DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
- DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
- DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
- DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
- DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
-
- if (spec->tcp_flags) {
- DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
- spec->tcp_flags = 0;
- }
-
- return 0;
-}
-
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_ipv6_l3_l4_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_ipv6_l3_l4_tag;
+ ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
}
static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
@@ -1622,653 +1031,110 @@ void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
}
-static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
-
- if (inner)
- DR_STE_SET_MPLS_MASK(mpls, misc2_mask, inner, bit_mask);
- else
- DR_STE_SET_MPLS_MASK(mpls, misc2_mask, outer, bit_mask);
-}
-
-static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
-
- if (sb->inner)
- DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
- else
- DR_STE_SET_MPLS_TAG(mpls, misc2_mask, outer, tag);
-
- return 0;
-}
-
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_mpls_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_mpls_tag;
-}
-
-static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(gre, bit_mask, gre_protocol, misc_mask, gre_protocol);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_k_present, misc_mask, gre_k_present);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_key_h, misc_mask, gre_key_h);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_key_l, misc_mask, gre_key_l);
-
- DR_STE_SET_MASK_V(gre, bit_mask, gre_c_present, misc_mask, gre_c_present);
- DR_STE_SET_MASK_V(gre, bit_mask, gre_s_present, misc_mask, gre_s_present);
+ ste_ctx->build_mpls_init(sb, mask);
}
-static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
-
- DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
- DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
- DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
-
- DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
-
- DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask, bool inner, bool rx)
-{
- dr_ste_build_gre_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_GRE;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_gre_tag;
-}
-
-static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
- misc_2_mask, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_gre_ttl);
- } else {
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_label,
- misc_2_mask, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_MASK_V(flex_parser_0, bit_mask, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_udp_ttl);
- }
-}
-
-static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2_mask, outer_first_mpls_over_gre_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_gre_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_gre_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_gre_ttl);
- } else {
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
- misc_2_mask, outer_first_mpls_over_udp_label);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
- misc_2_mask, outer_first_mpls_over_udp_exp);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
- misc_2_mask, outer_first_mpls_over_udp_s_bos);
-
- DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
- misc_2_mask, outer_first_mpls_over_udp_ttl);
- }
- return 0;
+ ste_ctx->build_tnl_gre_init(sb, mask);
}
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_0_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_0;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_0_tag;
-}
-
-#define ICMP_TYPE_OFFSET_FIRST_DW 24
-#define ICMP_CODE_OFFSET_FIRST_DW 16
-#define ICMP_HEADER_DATA_OFFSET_SECOND_DW 0
-
-static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
- struct mlx5dr_cmd_caps *caps,
- u8 *bit_mask)
-{
- bool is_ipv4_mask = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
- struct mlx5dr_match_misc3 *misc_3_mask = &mask->misc3;
- u32 icmp_header_data_mask;
- u32 icmp_type_mask;
- u32 icmp_code_mask;
- int dw0_location;
- int dw1_location;
-
- if (is_ipv4_mask) {
- icmp_header_data_mask = misc_3_mask->icmpv4_header_data;
- icmp_type_mask = misc_3_mask->icmpv4_type;
- icmp_code_mask = misc_3_mask->icmpv4_code;
- dw0_location = caps->flex_parser_id_icmp_dw0;
- dw1_location = caps->flex_parser_id_icmp_dw1;
- } else {
- icmp_header_data_mask = misc_3_mask->icmpv6_header_data;
- icmp_type_mask = misc_3_mask->icmpv6_type;
- icmp_code_mask = misc_3_mask->icmpv6_code;
- dw0_location = caps->flex_parser_id_icmpv6_dw0;
- dw1_location = caps->flex_parser_id_icmpv6_dw1;
- }
-
- switch (dw0_location) {
- case 4:
- if (icmp_type_mask) {
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
- (icmp_type_mask << ICMP_TYPE_OFFSET_FIRST_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_type = 0;
- else
- misc_3_mask->icmpv6_type = 0;
- }
- if (icmp_code_mask) {
- u32 cur_val = MLX5_GET(ste_flex_parser_1, bit_mask,
- flex_parser_4);
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_4,
- cur_val | (icmp_code_mask << ICMP_CODE_OFFSET_FIRST_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_code = 0;
- else
- misc_3_mask->icmpv6_code = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- switch (dw1_location) {
- case 5:
- if (icmp_header_data_mask) {
- MLX5_SET(ste_flex_parser_1, bit_mask, flex_parser_5,
- (icmp_header_data_mask << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
- if (is_ipv4_mask)
- misc_3_mask->icmpv4_header_data = 0;
- else
- misc_3_mask->icmpv6_header_data = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
- u32 icmp_header_data;
- int dw0_location;
- int dw1_location;
- u32 icmp_type;
- u32 icmp_code;
- bool is_ipv4;
-
- is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
- if (is_ipv4) {
- icmp_header_data = misc_3->icmpv4_header_data;
- icmp_type = misc_3->icmpv4_type;
- icmp_code = misc_3->icmpv4_code;
- dw0_location = sb->caps->flex_parser_id_icmp_dw0;
- dw1_location = sb->caps->flex_parser_id_icmp_dw1;
- } else {
- icmp_header_data = misc_3->icmpv6_header_data;
- icmp_type = misc_3->icmpv6_type;
- icmp_code = misc_3->icmpv6_code;
- dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
- dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
- }
-
- switch (dw0_location) {
- case 4:
- if (icmp_type) {
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- (icmp_type << ICMP_TYPE_OFFSET_FIRST_DW));
- if (is_ipv4)
- misc_3->icmpv4_type = 0;
- else
- misc_3->icmpv6_type = 0;
- }
-
- if (icmp_code) {
- u32 cur_val = MLX5_GET(ste_flex_parser_1, tag,
- flex_parser_4);
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
- cur_val | (icmp_code << ICMP_CODE_OFFSET_FIRST_DW));
- if (is_ipv4)
- misc_3->icmpv4_code = 0;
- else
- misc_3->icmpv6_code = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- switch (dw1_location) {
- case 5:
- if (icmp_header_data) {
- MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
- (icmp_header_data << ICMP_HEADER_DATA_OFFSET_SECOND_DW));
- if (is_ipv4)
- misc_3->icmpv4_header_data = 0;
- else
- misc_3->icmpv6_header_data = 0;
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
+ ste_ctx->build_tnl_mpls_init(sb, mask);
}
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx)
{
- int ret;
-
- ret = dr_ste_build_flex_parser_1_bit_mask(mask, caps, sb->bit_mask);
- if (ret)
- return ret;
-
sb->rx = rx;
sb->inner = inner;
sb->caps = caps;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_1;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_1_tag;
-
- return 0;
-}
-
-static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(general_purpose, bit_mask,
- general_purpose_lookup_field, misc_2_mask,
- metadata_reg_a);
-}
-
-static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
- misc_2_mask, metadata_reg_a);
-
- return 0;
+ return ste_ctx->build_icmp_init(sb, mask);
}
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_general_purpose_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_general_purpose_tag;
-}
-
-static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
-
- if (inner) {
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
- inner_tcp_seq_num);
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
- inner_tcp_ack_num);
- } else {
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, seq_num, misc_3_mask,
- outer_tcp_seq_num);
- DR_STE_SET_MASK_V(eth_l4_misc, bit_mask, ack_num, misc_3_mask,
- outer_tcp_ack_num);
- }
-}
-
-static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
- if (sb->inner) {
- DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
- DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
- } else {
- DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
- DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
- }
-
- return 0;
+ ste_ctx->build_general_purpose_init(sb, mask);
}
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_eth_l4_misc_bit_mask(mask, inner, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, rx, inner);
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_eth_l4_misc_tag;
+ ste_ctx->build_eth_l4_misc_init(sb, mask);
}
-static void
-dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
-{
- struct mlx5dr_match_misc3 *misc_3_mask = &value->misc3;
-
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_flags,
- misc_3_mask, outer_vxlan_gpe_flags);
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_next_protocol,
- misc_3_mask, outer_vxlan_gpe_next_protocol);
- DR_STE_SET_MASK_V(flex_parser_tnl_vxlan_gpe, bit_mask,
- outer_vxlan_gpe_vni,
- misc_3_mask, outer_vxlan_gpe_vni);
-}
-
-static int
-dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_flags, misc3,
- outer_vxlan_gpe_flags);
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_next_protocol, misc3,
- outer_vxlan_gpe_next_protocol);
- DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
- outer_vxlan_gpe_vni, misc3,
- outer_vxlan_gpe_vni);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(mask, inner,
- sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_vxlan_gpe_tag;
+ ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
}
-static void
-dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_protocol_type,
- misc_mask, geneve_protocol_type);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_oam,
- misc_mask, geneve_oam);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_opt_len,
- misc_mask, geneve_opt_len);
- DR_STE_SET_MASK_V(flex_parser_tnl_geneve, bit_mask,
- geneve_vni,
- misc_mask, geneve_vni);
-}
-
-static int
-dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc *misc = &value->misc;
-
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_protocol_type, misc, geneve_protocol_type);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_oam, misc, geneve_oam);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_opt_len, misc, geneve_opt_len);
- DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
- geneve_vni, misc, geneve_vni);
-
- return 0;
-}
-
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_flex_parser_tnl_geneve_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_flex_parser_tnl_geneve_tag;
+ ste_ctx->build_tnl_geneve_init(sb, mask);
}
-static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(register_0, bit_mask, register_0_h,
- misc_2_mask, metadata_reg_c_0);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_0_l,
- misc_2_mask, metadata_reg_c_1);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_1_h,
- misc_2_mask, metadata_reg_c_2);
- DR_STE_SET_MASK_V(register_0, bit_mask, register_1_l,
- misc_2_mask, metadata_reg_c_3);
-}
-
-static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
- DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
- DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
- DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
- DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
-
- return 0;
-}
-
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_register_0_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_register_0_tag;
+ ste_ctx->build_register_0_init(sb, mask);
}
-static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
-
- DR_STE_SET_MASK_V(register_1, bit_mask, register_2_h,
- misc_2_mask, metadata_reg_c_4);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_2_l,
- misc_2_mask, metadata_reg_c_5);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_3_h,
- misc_2_mask, metadata_reg_c_6);
- DR_STE_SET_MASK_V(register_1, bit_mask, register_3_l,
- misc_2_mask, metadata_reg_c_7);
-}
-
-static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
- DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
- DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
- DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
- DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
-
- return 0;
-}
-
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
- dr_ste_build_register_1_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
+ ste_ctx->build_register_1_init(sb, mask);
}
-static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
-{
- struct mlx5dr_match_misc *misc_mask = &value->misc;
-
- DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
- DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
- misc_mask->source_eswitch_owner_vhca_id = 0;
-}
-
-static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
- struct mlx5dr_ste_build *sb,
- u8 *tag)
-{
- struct mlx5dr_match_misc *misc = &value->misc;
- struct mlx5dr_cmd_vport_cap *vport_cap;
- struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
- u8 *bit_mask = sb->bit_mask;
- bool source_gvmi_set;
-
- DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
-
- if (sb->vhca_id_valid) {
- /* Find port GVMI based on the eswitch_owner_vhca_id */
- if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
- else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
- dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
- else
- return -EINVAL;
- } else {
- caps = &dmn->info.caps;
- }
-
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
- if (!vport_cap)
- return -EINVAL;
-
- source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
- if (vport_cap->vport_gvmi && source_gvmi_set)
- MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
-
- misc->source_eswitch_owner_vhca_id = 0;
- misc->source_port = 0;
-
- return 0;
-}
-
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx)
@@ -2276,12 +1142,21 @@ void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
- dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
-
sb->rx = rx;
sb->dmn = dmn;
sb->inner = inner;
- sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
- sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
- sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
+ ste_ctx->build_src_gvmi_qpn_init(sb, mask);
+}
+
+static struct mlx5dr_ste_ctx *mlx5dr_ste_ctx_arr[] = {
+ [MLX5_STEERING_FORMAT_CONNECTX_5] = &ste_ctx_v0,
+ [MLX5_STEERING_FORMAT_CONNECTX_6DX] = &ste_ctx_v1,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
+{
+ if (version > MLX5_STEERING_FORMAT_CONNECTX_6DX)
+ return NULL;
+
+ return mlx5dr_ste_ctx_arr[version];
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
new file mode 100644
index 000000000000..06bcb0ee8f96
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef _DR_STE_
+#define _DR_STE_
+
+#include "dr_types.h"
+
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_SPI 0x3
+#define IP_VERSION_IPV4 0x4
+#define IP_VERSION_IPV6 0x6
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define HDR_LEN_L2_MACS 0xC
+#define HDR_LEN_L2_VLAN 0x4
+#define HDR_LEN_L2_ETHER 0x2
+#define HDR_LEN_L2 (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
+#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
+
+/* Set to STE a specific value using DR_STE_SET */
+#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
+ if ((spec)->s_fname) { \
+ MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
+ (spec)->s_fname = 0; \
+ } \
+} while (0)
+
+/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
+#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
+
+/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
+#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
+ DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
+
+#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
+ MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
+} while (0)
+
+#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
+ struct mlx5dr_match_misc2 *_mask = mask; \
+ u8 *_tag = tag; \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
+ in_out##_first_mpls_label);\
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
+ in_out##_first_mpls_s_bos); \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
+ in_out##_first_mpls_exp); \
+ DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
+ in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_gre_label || \
+ (_misc)->outer_first_mpls_over_gre_exp || \
+ (_misc)->outer_first_mpls_over_gre_s_bos || \
+ (_misc)->outer_first_mpls_over_gre_ttl)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+ (_misc)->outer_first_mpls_over_udp_label || \
+ (_misc)->outer_first_mpls_over_udp_exp || \
+ (_misc)->outer_first_mpls_over_udp_s_bos || \
+ (_misc)->outer_first_mpls_over_udp_ttl)
+
+enum dr_ste_action_modify_type_l3 {
+ DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
+ DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
+ DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
+};
+
+enum dr_ste_action_modify_type_l4 {
+ DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
+ DR_STE_ACTION_MDFY_TYPE_L4_TCP = 0x1,
+ DR_STE_ACTION_MDFY_TYPE_L4_UDP = 0x2,
+};
+
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
+
+#define DR_STE_CTX_BUILDER(fname) \
+ ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
+ struct mlx5dr_match_param *mask))
+
+struct mlx5dr_ste_ctx {
+ /* Builders */
+ void DR_STE_CTX_BUILDER(eth_l2_src_dst);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
+ void DR_STE_CTX_BUILDER(eth_l2_src);
+ void DR_STE_CTX_BUILDER(eth_l2_dst);
+ void DR_STE_CTX_BUILDER(eth_l2_tnl);
+ void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
+ void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
+ void DR_STE_CTX_BUILDER(mpls);
+ void DR_STE_CTX_BUILDER(tnl_gre);
+ void DR_STE_CTX_BUILDER(tnl_mpls);
+ int DR_STE_CTX_BUILDER(icmp);
+ void DR_STE_CTX_BUILDER(general_purpose);
+ void DR_STE_CTX_BUILDER(eth_l4_misc);
+ void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
+ void DR_STE_CTX_BUILDER(tnl_geneve);
+ void DR_STE_CTX_BUILDER(register_0);
+ void DR_STE_CTX_BUILDER(register_1);
+ void DR_STE_CTX_BUILDER(src_gvmi_qpn);
+
+ /* Getters and Setters */
+ void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi);
+ void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
+ u16 (*get_next_lu_type)(u8 *hw_ste_p);
+ void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
+ u64 (*get_miss_addr)(u8 *hw_ste_p);
+ void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+ void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
+ u16 (*get_byte_mask)(u8 *hw_ste_p);
+
+ /* Actions */
+ void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+ void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *hw_ste_arr,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+ u32 modify_field_arr_sz;
+ const struct mlx5dr_ste_action_modify_field *modify_field_arr;
+ void (*set_action_set)(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+ void (*set_action_add)(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+ void (*set_action_copy)(u8 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter);
+ int (*set_action_decap_l3_list)(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num);
+
+ /* Send */
+ void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
+};
+
+extern struct mlx5dr_ste_ctx ste_ctx_v0;
+extern struct mlx5dr_ste_ctx ste_ctx_v1;
+
+#endif /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
new file mode 100644
index 000000000000..9ec079247c4b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -0,0 +1,1640 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include "dr_ste.h"
+
+#define SVLAN_ETHERTYPE 0x88a8
+#define DR_STE_ENABLE_FLOW_TAG BIT(31)
+
+enum dr_ste_v0_action_tunl {
+ DR_STE_TUNL_ACTION_NONE = 0,
+ DR_STE_TUNL_ACTION_ENABLE = 1,
+ DR_STE_TUNL_ACTION_DECAP = 2,
+ DR_STE_TUNL_ACTION_L3_DECAP = 3,
+ DR_STE_TUNL_ACTION_POP_VLAN = 4,
+};
+
+enum dr_ste_v0_action_type {
+ DR_STE_ACTION_TYPE_PUSH_VLAN = 1,
+ DR_STE_ACTION_TYPE_ENCAP_L3 = 3,
+ DR_STE_ACTION_TYPE_ENCAP = 4,
+};
+
+enum dr_ste_v0_action_mdfy_op {
+ DR_STE_ACTION_MDFY_OP_COPY = 0x1,
+ DR_STE_ACTION_MDFY_OP_SET = 0x2,
+ DR_STE_ACTION_MDFY_OP_ADD = 0x3,
+};
+
+#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
+ ((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
+ (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
+ DR_STE_V0_LU_TYPE_##lookup_type##_O)
+
+enum {
+ DR_STE_V0_LU_TYPE_NOP = 0x00,
+ DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
+ DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_O = 0x06,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_I = 0x07,
+ DR_STE_V0_LU_TYPE_ETHL2_DST_D = 0x1b,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_O = 0x08,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_I = 0x09,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_D = 0x1c,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
+ DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
+ DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
+ DR_STE_V0_LU_TYPE_ETHL4_O = 0x13,
+ DR_STE_V0_LU_TYPE_ETHL4_I = 0x14,
+ DR_STE_V0_LU_TYPE_ETHL4_D = 0x21,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_O = 0x2c,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_I = 0x2d,
+ DR_STE_V0_LU_TYPE_ETHL4_MISC_D = 0x2e,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_O = 0x15,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_I = 0x24,
+ DR_STE_V0_LU_TYPE_MPLS_FIRST_D = 0x25,
+ DR_STE_V0_LU_TYPE_GRE = 0x16,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_0 = 0x22,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_1 = 0x23,
+ DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
+ DR_STE_V0_LU_TYPE_GENERAL_PURPOSE = 0x18,
+ DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
+ DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
+ DR_STE_V0_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum {
+ DR_STE_V0_ACTION_MDFY_FLD_L2_0 = 0,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_1 = 1,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_2 = 2,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_0 = 3,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_1 = 4,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_2 = 5,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_3 = 6,
+ DR_STE_V0_ACTION_MDFY_FLD_L3_4 = 7,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_0 = 8,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_1 = 9,
+ DR_STE_V0_ACTION_MDFY_FLD_MPLS = 10,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0 = 11,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_0 = 12,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_1 = 13,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_2 = 14,
+ DR_STE_V0_ACTION_MDFY_FLD_REG_3 = 15,
+ DR_STE_V0_ACTION_MDFY_FLD_L4_2 = 16,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_0 = 17,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_1 = 18,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_2 = 19,
+ DR_STE_V0_ACTION_MDFY_FLD_FLEX_3 = 20,
+ DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1 = 21,
+ DR_STE_V0_ACTION_MDFY_FLD_METADATA = 22,
+ DR_STE_V0_ACTION_MDFY_FLD_RESERVED = 23,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
+ },
+};
+
+static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+}
+
+static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, entry_type);
+}
+
+static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+ u64 index = miss_addr >> 6;
+
+ /* Miss address for TX and RX STEs located in the same offsets */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
+}
+
+static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
+{
+ u64 index =
+ ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
+ ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
+
+ return index << 6;
+}
+
+static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+{
+ MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
+}
+
+static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, byte_mask);
+}
+
+static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
+}
+
+static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
+}
+
+static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
+}
+
+static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+{
+ u64 index = (icm_addr >> 5) | ht_size;
+
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
+ MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
+}
+
+static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi)
+{
+ dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
+ dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
+ dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+ /* Set GVMI once, this is the same for RX/TX
+ * bits 63_48 of next table base / miss address encode the next GVMI
+ */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
+ DR_STE_ENABLE_FLOW_TAG | flow_tag);
+}
+
+static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+ /* This can be used for both rx_steering_mult and for sx_transmit */
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
+}
+
+static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
+}
+
+static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
+ bool go_back)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ DR_STE_ACTION_TYPE_PUSH_VLAN);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
+ /* Due to HW limitation we need to set this bit, otherwise reforamt +
+ * push vlan will not work.
+ */
+ if (go_back)
+ dr_ste_v0_set_go_back_bit(hw_ste_p);
+}
+
+static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
+ int size, bool encap_l3)
+{
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+ encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
+ MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
+}
+
+static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_DECAP);
+}
+
+static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_POP_VLAN);
+}
+
+static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
+{
+ MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+ DR_STE_TUNL_ACTION_L3_DECAP);
+ MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+}
+
+static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+ u32 re_write_index)
+{
+ MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
+ num_of_actions);
+ MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
+ re_write_index);
+}
+
+static void dr_ste_v0_arr_init_next(u8 **last_ste,
+ u32 *added_stes,
+ enum mlx5dr_ste_entry_type entry_type,
+ u16 gvmi)
+{
+ (*added_stes)++;
+ *last_ste += DR_STE_SIZE;
+ dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
+ entry_type, gvmi);
+}
+
+static void
+dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
+
+ /* We want to make sure the modify header comes before L2
+ * encapsulation. The reason for that is that we support
+ * modify headers for outer headers only
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ dr_ste_v0_set_tx_push_vlan(last_ste,
+ attr->vlans.headers[i],
+ encap);
+ }
+ }
+
+ if (encap) {
+ /* Modify header and encapsulation require a different STEs.
+ * Since modify header STE format doesn't support encapsulation
+ * tunneling_action.
+ */
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
+ action_type_set[DR_ACTION_TYP_PUSH_VLAN])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_TX,
+ attr->gvmi);
+
+ dr_ste_v0_set_tx_encap(last_ste,
+ attr->reformat_id,
+ attr->reformat_size,
+ action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
+ /* Whenever prio_tag_required enabled, we can be sure that the
+ * previous table (ACL) already push vlan to our packet,
+ * And due to HW limitation we need to set this bit, otherwise
+ * push vlan + reformat will not work.
+ */
+ if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
+ dr_ste_v0_set_go_back_bit(last_ste);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+ dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void
+dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+ dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->decap_actions,
+ attr->decap_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
+ dr_ste_v0_set_rx_decap(last_ste);
+
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (i ||
+ action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
+ action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ dr_ste_v0_set_rx_pop_vlan(last_ste);
+ }
+ }
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_MODIFY_PKT,
+ attr->gvmi);
+ else
+ dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT);
+
+ dr_ste_v0_set_rewrite_actions(last_ste,
+ attr->modify_actions,
+ attr->modify_index);
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TAG]) {
+ if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT)
+ dr_ste_v0_arr_init_next(&last_ste,
+ added_stes,
+ MLX5DR_STE_TYPE_RX,
+ attr->gvmi);
+
+ dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
+ }
+
+ dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v0_set_action_set(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ length = (length == 32) ? 0 : length;
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_add(u8 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ length = (length == 32) ? 0 : length;
+ MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_copy(u8 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
+ MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
+ MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
+ MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
+}
+
+#define DR_STE_DECAP_L3_MIN_ACTION_NUM 5
+
+static int
+dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
+ u8 *hw_action, u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
+ u32 hw_action_num;
+ int required_actions;
+ u32 hdr_fld_4b;
+ u16 hdr_fld_2b;
+ u16 vlan_type;
+ bool vlan;
+
+ vlan = (data_sz != HDR_LEN_L2);
+ hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
+ required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
+
+ if (hw_action_num < required_actions)
+ return -ENOMEM;
+
+ /* dmac_47_16 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 16);
+ hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_4b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* smac_47_16 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
+ hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
+ MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* dmac_15_0 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_2b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* ethertype + (optional) vlan */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 32);
+ if (!vlan) {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
+ } else {
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+ vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
+ hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+ MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
+ }
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ /* smac_15_0 */
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
+ MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+ hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+ if (vlan) {
+ MLX5_SET(dr_action_hw_set, hw_action,
+ opcode, DR_STE_ACTION_MDFY_OP_SET);
+ hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ inline_data, hdr_fld_2b);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_length, 16);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+ MLX5_SET(dr_action_hw_set, hw_action,
+ destination_left_shifter, 0);
+ }
+
+ *used_hw_action_num = required_actions;
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ if (mask->smac_47_16 || mask->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
+ mask->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
+ mask->smac_47_16 << 16 | mask->smac_15_0);
+ mask->smac_47_16 = 0;
+ mask->smac_15_0 = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ } else if (mask->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+ mask->svlan_tag = 0;
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ if (spec->smac_47_16 || spec->smac_15_0) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
+ spec->smac_47_16 >> 16);
+ MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
+ spec->smac_47_16 << 16 | spec->smac_15_0);
+ spec->smac_47_16 = 0;
+ spec->smac_15_0 = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_mask->inner_second_cvlan_tag ||
+ misc_mask->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->inner_second_cvlan_tag = 0;
+ misc_mask->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_priority, misc_mask, inner_second_prio);
+ } else {
+ if (misc_mask->outer_second_cvlan_tag ||
+ misc_mask->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->outer_second_cvlan_tag = 0;
+ misc_mask->outer_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_vlan_id, misc_mask, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_cfi, misc_mask, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask,
+ second_priority, misc_mask, outer_second_prio);
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+ bool inner, u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_spec = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_spec->inner_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->inner_second_cvlan_tag = 0;
+ } else if (misc_spec->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
+ } else {
+ if (misc_spec->outer_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->outer_second_cvlan_tag = 0;
+ } else if (misc_spec->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->outer_second_svlan_tag = 0;
+ }
+ DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
+
+ dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
+
+ return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+ return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask,
+ l2_tunneling_network_id, (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+}
+
+static int
+dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
+ (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (spec->ip_version) {
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
+ DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
+}
+
+static int
+dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ if (sb->inner)
+ DR_STE_SET_MPLS(mpls, misc2, inner, tag);
+ else
+ DR_STE_SET_MPLS(mpls, misc2, outer, tag);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
+
+ DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
+ DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
+ DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
+
+ DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
+
+ DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
+ misc_2, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_exp,
+ misc_2, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_s_bos,
+ misc_2, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_TAG(flex_parser_0, tag, parser_3_ttl,
+ misc_2, outer_first_mpls_over_udp_ttl);
+ }
+ return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
+}
+
+#define ICMP_TYPE_OFFSET_FIRST_DW 24
+#define ICMP_CODE_OFFSET_FIRST_DW 16
+
+static int
+dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
+ u32 *icmp_header_data;
+ int dw0_location;
+ int dw1_location;
+ u8 *icmp_type;
+ u8 *icmp_code;
+ bool is_ipv4;
+
+ is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
+ if (is_ipv4) {
+ icmp_header_data = &misc_3->icmpv4_header_data;
+ icmp_type = &misc_3->icmpv4_type;
+ icmp_code = &misc_3->icmpv4_code;
+ dw0_location = sb->caps->flex_parser_id_icmp_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmp_dw1;
+ } else {
+ icmp_header_data = &misc_3->icmpv6_header_data;
+ icmp_type = &misc_3->icmpv6_type;
+ icmp_code = &misc_3->icmpv6_code;
+ dw0_location = sb->caps->flex_parser_id_icmpv6_dw0;
+ dw1_location = sb->caps->flex_parser_id_icmpv6_dw1;
+ }
+
+ switch (dw0_location) {
+ case 4:
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_4,
+ (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
+ (*icmp_code << ICMP_TYPE_OFFSET_FIRST_DW));
+
+ *icmp_type = 0;
+ *icmp_code = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dw1_location) {
+ case 5:
+ MLX5_SET(ste_flex_parser_1, tag, flex_parser_5,
+ *icmp_header_data);
+ *icmp_header_data = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ int ret;
+
+ ret = dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
+ if (ret)
+ return ret;
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
+
+ return 0;
+}
+
+static int
+dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+ DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+ misc_2, metadata_reg_a);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ if (sb->inner) {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_flags, misc3,
+ outer_vxlan_gpe_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_next_protocol, misc3,
+ outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_vni, misc3,
+ outer_vxlan_gpe_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_protocol_type, misc, geneve_protocol_type);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_oam, misc, geneve_oam);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_opt_len, misc, geneve_opt_len);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_vni, misc, geneve_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
+ sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
+}
+
+static int
+dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+ DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+ DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+ DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
+}
+
+static int
+dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+ DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+ DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+ DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
+ DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
+ misc_mask->source_eswitch_owner_vhca_id = 0;
+}
+
+static int
+dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *dmn = sb->dmn;
+ struct mlx5dr_cmd_caps *caps;
+ u8 *bit_mask = sb->bit_mask;
+ bool source_gvmi_set;
+
+ DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
+
+ if (sb->vhca_id_valid) {
+ /* Find port GVMI based on the eswitch_owner_vhca_id */
+ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+ caps = &dmn->info.caps;
+ else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
+ dmn->peer_dmn->info.caps.gvmi))
+ caps = &dmn->peer_dmn->info.caps;
+ else
+ return -EINVAL;
+
+ misc->source_eswitch_owner_vhca_id = 0;
+ } else {
+ caps = &dmn->info.caps;
+ }
+
+ source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
+ if (source_gvmi_set) {
+ vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ if (!vport_cap) {
+ mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
+ misc->source_port);
+ return -EINVAL;
+ }
+
+ if (vport_cap->vport_gvmi)
+ MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_port = 0;
+ }
+
+ return 0;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
+}
+
+struct mlx5dr_ste_ctx ste_ctx_v0 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v0_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v0_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v0_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v0_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v0_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v0_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v0_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v0_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v0_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v0_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v0_build_tnl_mpls_init,
+ .build_icmp_init = &dr_ste_v0_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v0_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v0_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
+ .build_register_0_init = &dr_ste_v0_build_register_0_init,
+ .build_register_1_init = &dr_ste_v0_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v0_build_src_gvmi_qpn_init,
+
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v0_init,
+ .set_next_lu_type = &dr_ste_v0_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v0_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v0_set_miss_addr,
+ .get_miss_addr = &dr_ste_v0_get_miss_addr,
+ .set_hit_addr = &dr_ste_v0_set_hit_addr,
+ .set_byte_mask = &dr_ste_v0_set_byte_mask,
+ .get_byte_mask = &dr_ste_v0_get_byte_mask,
+
+ /* Actions */
+ .set_actions_rx = &dr_ste_v0_set_actions_rx,
+ .set_actions_tx = &dr_ste_v0_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v0_action_modify_field_arr,
+ .set_action_set = &dr_ste_v0_set_action_set,
+ .set_action_add = &dr_ste_v0_set_action_add,
+ .set_action_copy = &dr_ste_v0_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v0_set_action_decap_l3_list,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
new file mode 100644
index 000000000000..4088d6e51508
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -0,0 +1,1633 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#include <linux/types.h>
+#include "mlx5_ifc_dr_ste_v1.h"
+#include "dr_ste.h"
+
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+ ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+ DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+ DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+ DR_STE_V1_TYPE_BWC_DW = 0x1,
+ DR_STE_V1_TYPE_MATCH = 0x2,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+ DR_STE_V1_LU_TYPE_NOP = 0x0000,
+ DR_STE_V1_LU_TYPE_ETHL2_TNL = 0x0002,
+ DR_STE_V1_LU_TYPE_IBL3_EXT = 0x0102,
+ DR_STE_V1_LU_TYPE_ETHL2_O = 0x0003,
+ DR_STE_V1_LU_TYPE_IBL4 = 0x0103,
+ DR_STE_V1_LU_TYPE_ETHL2_I = 0x0004,
+ DR_STE_V1_LU_TYPE_SRC_QP_GVMI = 0x0104,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_O = 0x0005,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O = 0x0105,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_I = 0x0006,
+ DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I = 0x0106,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x0007,
+ DR_STE_V1_LU_TYPE_IPV6_DES_O = 0x0107,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x0008,
+ DR_STE_V1_LU_TYPE_IPV6_DES_I = 0x0108,
+ DR_STE_V1_LU_TYPE_ETHL4_O = 0x0009,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_O = 0x0109,
+ DR_STE_V1_LU_TYPE_ETHL4_I = 0x000a,
+ DR_STE_V1_LU_TYPE_IPV6_SRC_I = 0x010a,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O = 0x000b,
+ DR_STE_V1_LU_TYPE_MPLS_O = 0x010b,
+ DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I = 0x000c,
+ DR_STE_V1_LU_TYPE_MPLS_I = 0x010c,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O = 0x000d,
+ DR_STE_V1_LU_TYPE_GRE = 0x010d,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x000e,
+ DR_STE_V1_LU_TYPE_GENERAL_PURPOSE = 0x010e,
+ DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I = 0x000f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0 = 0x010f,
+ DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1 = 0x0110,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_0 = 0x0111,
+ DR_STE_V1_LU_TYPE_FLEX_PARSER_1 = 0x0112,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_O = 0x0113,
+ DR_STE_V1_LU_TYPE_ETHL4_MISC_I = 0x0114,
+ DR_STE_V1_LU_TYPE_INVALID = 0x00ff,
+ DR_STE_V1_LU_TYPE_DONT_CARE = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+ DR_STE_HEADER_ANCHOR_START_OUTER = 0x00,
+ DR_STE_HEADER_ANCHOR_1ST_VLAN = 0x02,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4 = 0x07,
+ DR_STE_HEADER_ANCHOR_INNER_MAC = 0x13,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4 = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+ DR_STE_ACTION_SINGLE_SZ = 4,
+ DR_STE_ACTION_DOUBLE_SZ = 8,
+ DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0, /* Regular push header (e.g. push vlan) */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2, /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+ DR_STE_V1_ACTION_ID_NOP = 0x00,
+ DR_STE_V1_ACTION_ID_COPY = 0x05,
+ DR_STE_V1_ACTION_ID_SET = 0x06,
+ DR_STE_V1_ACTION_ID_ADD = 0x07,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE = 0x08,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER = 0x09,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE = 0x0a,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER = 0x0b,
+ DR_STE_V1_ACTION_ID_FLOW_TAG = 0x0c,
+ DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
+ DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
+ DR_STE_V1_ACTION_ID_TRAILER = 0x13,
+ DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
+ DR_STE_V1_ACTION_ID_MAX = 0x21,
+ /* use for special cases */
+ DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3 = 0x22,
+};
+
+enum {
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0 = 0x00,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1 = 0x01,
+ DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2 = 0x02,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0 = 0x08,
+ DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1 = 0x09,
+ DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0 = 0x0e,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0 = 0x18,
+ DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1 = 0x19,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0 = 0x40,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1 = 0x41,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0 = 0x44,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1 = 0x45,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2 = 0x46,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3 = 0x47,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0 = 0x4c,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1 = 0x4d,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2 = 0x4e,
+ DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
+ DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
+ DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3 = 0x8d,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4 = 0x8e,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5 = 0x8f,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6 = 0x90,
+ DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7 = 0x91,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+ .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+ .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_6, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_7, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_4, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_5, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_3, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+ },
+};
+
+static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
+}
+
+static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+ u64 index = miss_addr >> 6;
+
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
+}
+
+static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
+{
+ u64 index =
+ (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
+ MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26);
+
+ return index << 6;
+}
+
+static void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
+}
+
+static u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
+{
+ return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
+}
+
+static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
+}
+
+static void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
+}
+
+static u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
+{
+ u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
+ u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
+
+ return (mode << 8 | index);
+}
+
+static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+{
+ u64 index = (icm_addr >> 5) | ht_size;
+
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
+}
+
+static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type,
+ u8 entry_type, u16 gvmi)
+{
+ dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
+ dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p,
+ u32 ste_size)
+{
+ u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
+ u8 *mask = tag + DR_STE_SIZE_TAG;
+ u8 tmp_tag[DR_STE_SIZE_TAG] = {};
+
+ if (ste_size == DR_STE_SIZE_CTRL)
+ return;
+
+ WARN_ON(ste_size != DR_STE_SIZE);
+
+ /* Backup tag */
+ memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
+
+ /* Swap mask and tag both are the same size */
+ memcpy(tag, mask, DR_STE_SIZE_MASK);
+ memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
+}
+
+static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
+{
+ MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
+ DR_STE_V1_ACTION_ID_FLOW_TAG);
+ MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
+}
+
+static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
+}
+
+static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+{
+ MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
+}
+
+static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
+ u32 vlan_hdr)
+{
+ MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects offset to vlan header in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+ start_offset, HDR_LEN_L2_MACS >> 1);
+ MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+ inline_data, vlan_hdr);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+{
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+ action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+ start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+ remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_tx_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
+{
+ /* Remove L2 headers */
+ MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+ /* Encapsulate with given reformat ID */
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ /* The hardware expects here size in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+ MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rx_decap_l3(u8 *hw_ste_p,
+ u8 *s_action,
+ u16 decap_actions,
+ u32 decap_index)
+{
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST);
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
+ decap_actions);
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
+ decap_index);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
+ u8 *s_action,
+ u16 num_of_actions,
+ u32 re_write_index)
+{
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
+ DR_STE_V1_ACTION_ID_MODIFY_LIST);
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
+ num_of_actions);
+ MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
+ re_write_index);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
+ u32 *added_stes,
+ u16 gvmi)
+{
+ u8 *action;
+
+ (*added_stes)++;
+ *last_ste += DR_STE_SIZE;
+ dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
+ dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
+
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
+ memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
+}
+
+static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
+ u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
+ bool allow_encap = true;
+
+ if (action_type_set[DR_ACTION_TYP_CTR])
+ dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
+ attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1,
+ last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_rewrite_actions(last_ste, action,
+ attr->modify_actions,
+ attr->modify_index);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ allow_encap = false;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+ int i;
+
+ for (i = 0; i < attr->vlans.count; i++) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_encap = true;
+ }
+ dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+ }
+
+ if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
+ if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_encap = true;
+ }
+ dr_ste_v1_set_tx_encap(last_ste, action,
+ attr->reformat_id,
+ attr->reformat_size);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
+ u8 *d_action;
+
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ d_action = action + DR_STE_ACTION_SINGLE_SZ;
+
+ dr_ste_v1_set_tx_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat_id,
+ attr->reformat_size);
+ action_sz -= DR_STE_ACTION_TRIPLE_SZ;
+ action += DR_STE_ACTION_TRIPLE_SZ;
+ }
+
+ dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes)
+{
+ u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
+ u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
+ bool allow_modify_hdr = true;
+ bool allow_ctr = true;
+
+ if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+ dr_ste_v1_set_rx_decap_l3(last_ste, action,
+ attr->decap_actions,
+ attr->decap_index);
+ dr_ste_v1_set_rewrite_actions(last_ste, action,
+ attr->decap_actions,
+ attr->decap_index);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ allow_modify_hdr = false;
+ allow_ctr = false;
+ } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
+ dr_ste_v1_set_rx_decap(last_ste, action);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
+ allow_modify_hdr = false;
+ allow_ctr = false;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_TAG]) {
+ if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = true;
+ allow_ctr = true;
+ }
+ dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+ if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
+ !allow_modify_hdr) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = false;
+ allow_ctr = false;
+ }
+
+ dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count);
+ action_sz -= DR_STE_ACTION_SINGLE_SZ;
+ action += DR_STE_ACTION_SINGLE_SZ;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+ /* Modify header and decapsulation must use different STEs */
+ if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = true;
+ allow_ctr = true;
+ }
+ dr_ste_v1_set_rewrite_actions(last_ste, action,
+ attr->modify_actions,
+ attr->modify_index);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ }
+
+ if (action_type_set[DR_ACTION_TYP_CTR]) {
+ /* Counter action set after decap to exclude decaped header */
+ if (!allow_ctr) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = true;
+ allow_ctr = false;
+ }
+ dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+ }
+
+ dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
+ dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v1_set_action_set(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+ MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
+ MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
+ MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
+ MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
+ MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
+}
+
+static void dr_ste_v1_set_action_add(u8 *d_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data)
+{
+ shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+ MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
+ MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
+ MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
+ MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
+ MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
+}
+
+static void dr_ste_v1_set_action_copy(u8 *d_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter)
+{
+ dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+ src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+ MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
+ MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
+ MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
+ MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
+ MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
+ MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
+}
+
+#define DR_STE_DECAP_L3_ACTION_NUM 8
+#define DR_STE_L2_HDR_MAX_SZ 20
+
+static int dr_ste_v1_set_action_decap_l3_list(void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num)
+{
+ u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+ void *data_ptr = padded_data;
+ u16 used_actions = 0;
+ u32 inline_data_sz;
+ u32 i;
+
+ if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+ return -EINVAL;
+
+ memcpy(padded_data, data, data_sz);
+
+ /* Remove L2L3 outer headers */
+ MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+ MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
+ MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
+ MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
+ DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ used_actions++; /* Remove and NOP are a single double action */
+
+ inline_data_sz =
+ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+ /* Add the new header inline + 2 extra bytes */
+ for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+ void *addr_inline;
+
+ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_INSERT_INLINE);
+ /* The hardware expects here offset to words (2 bytes) */
+ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
+ i * 2);
+
+ /* Copy bytes one by one to avoid endianness problem */
+ addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
+ hw_action, inline_data);
+ memcpy(addr_inline, data_ptr, inline_data_sz);
+ hw_action += DR_STE_ACTION_DOUBLE_SZ;
+ data_ptr += inline_data_sz;
+ used_actions++;
+ }
+
+ /* Remove 2 extra bytes */
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
+ DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
+ /* The hardware expects here size in words (2 bytes) */
+ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
+ used_actions++;
+
+ *used_hw_action_num = used_actions;
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ } else if (mask->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
+ mask->svlan_tag = 0;
+ }
+}
+
+static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
+
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else if (spec->ip_version) {
+ return -EINVAL;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+ DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag); // ?
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype); // ?
+ DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_mask->inner_second_cvlan_tag ||
+ misc_mask->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->inner_second_cvlan_tag = 0;
+ misc_mask->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_vlan_id, misc_mask, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_cfi, misc_mask, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_priority, misc_mask, inner_second_prio);
+ } else {
+ if (misc_mask->outer_second_cvlan_tag ||
+ misc_mask->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
+ misc_mask->outer_second_cvlan_tag = 0;
+ misc_mask->outer_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_vlan_id, misc_mask, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_cfi, misc_mask, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+ second_priority, misc_mask, outer_second_prio);
+ }
+}
+
+static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+ bool inner, u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc_spec = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
+
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else if (spec->ip_version) {
+ return -EINVAL;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (inner) {
+ if (misc_spec->inner_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->inner_second_cvlan_tag = 0;
+ } else if (misc_spec->inner_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->inner_second_svlan_tag = 0;
+ }
+
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
+ } else {
+ if (misc_spec->outer_second_cvlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
+ misc_spec->outer_second_cvlan_tag = 0;
+ } else if (misc_spec->outer_second_svlan_tag) {
+ MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
+ misc_spec->outer_second_svlan_tag = 0;
+ }
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
+
+ dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
+ DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
+
+ return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+ dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
+
+ return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
+{
+ struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
+ DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
+ l2_tunneling_network_id, (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (mask->svlan_tag || mask->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
+ mask->cvlan_tag = 0;
+ mask->svlan_tag = 0;
+ }
+}
+
+static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
+ DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
+
+ if (misc->vxlan_vni) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
+ (misc->vxlan_vni << 8));
+ misc->vxlan_vni = 0;
+ }
+
+ if (spec->cvlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+ spec->cvlan_tag = 0;
+ } else if (spec->svlan_tag) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+ spec->svlan_tag = 0;
+ }
+
+ if (spec->ip_version == IP_VERSION_IPV4) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
+ spec->ip_version = 0;
+ } else if (spec->ip_version == IP_VERSION_IPV6) {
+ MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
+ spec->ip_version = 0;
+ } else if (spec->ip_version) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+ DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
+}
+
+static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
+ DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
+ DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
+ DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
+ DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
+ DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
+ DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
+ DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
+ DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+ if (sb->inner)
+ DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
+ else
+ DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
+
+ if (spec->tcp_flags) {
+ DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
+ spec->tcp_flags = 0;
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
+}
+
+static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ if (sb->inner)
+ DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
+ else
+ DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
+}
+
+static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
+ DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
+ DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
+ DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
+
+ DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
+ DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
+}
+
+static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
+ misc2, outer_first_mpls_over_gre_label);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
+ misc2, outer_first_mpls_over_gre_exp);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
+ misc2, outer_first_mpls_over_gre_s_bos);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
+ misc2, outer_first_mpls_over_gre_ttl);
+ } else {
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
+ misc2, outer_first_mpls_over_udp_label);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
+ misc2, outer_first_mpls_over_udp_exp);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
+ misc2, outer_first_mpls_over_udp_s_bos);
+
+ DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
+ misc2, outer_first_mpls_over_udp_ttl);
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
+}
+
+static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+ bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
+ u32 *icmp_header_data;
+ u8 *icmp_type;
+ u8 *icmp_code;
+
+ if (is_ipv4) {
+ icmp_header_data = &misc3->icmpv4_header_data;
+ icmp_type = &misc3->icmpv4_type;
+ icmp_code = &misc3->icmpv4_code;
+ } else {
+ icmp_header_data = &misc3->icmpv6_header_data;
+ icmp_type = &misc3->icmpv6_type;
+ icmp_code = &misc3->icmpv6_code;
+ }
+
+ MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
+ MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
+ MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
+
+ *icmp_header_data = 0;
+ *icmp_type = 0;
+ *icmp_code = 0;
+
+ return 0;
+}
+
+static int dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
+
+ return 0;
+}
+
+static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+ misc2, metadata_reg_a);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
+}
+
+static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ if (sb->inner) {
+ DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
+ } else {
+ DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
+ DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
+ }
+
+ return 0;
+}
+
+static void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_flags, misc3,
+ outer_vxlan_gpe_flags);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_next_protocol, misc3,
+ outer_vxlan_gpe_next_protocol);
+ DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+ outer_vxlan_gpe_vni, misc3,
+ outer_vxlan_gpe_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_protocol_type, misc, geneve_protocol_type);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_oam, misc, geneve_oam);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_opt_len, misc, geneve_opt_len);
+ DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+ geneve_vni, misc, geneve_vni);
+
+ return 0;
+}
+
+static void
+dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
+}
+
+static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+ DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+ DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+ DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
+}
+
+static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+ DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+ DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+ DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+ DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+ return 0;
+}
+
+static void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
+}
+
+static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
+{
+ struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+ DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
+ DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
+}
+
+static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+ struct mlx5dr_ste_build *sb,
+ u8 *tag)
+{
+ struct mlx5dr_match_misc *misc = &value->misc;
+ struct mlx5dr_cmd_vport_cap *vport_cap;
+ struct mlx5dr_domain *dmn = sb->dmn;
+ struct mlx5dr_cmd_caps *caps;
+ u8 *bit_mask = sb->bit_mask;
+
+ DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
+
+ if (sb->vhca_id_valid) {
+ /* Find port GVMI based on the eswitch_owner_vhca_id */
+ if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+ caps = &dmn->info.caps;
+ else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
+ dmn->peer_dmn->info.caps.gvmi))
+ caps = &dmn->peer_dmn->info.caps;
+ else
+ return -EINVAL;
+
+ misc->source_eswitch_owner_vhca_id = 0;
+ } else {
+ caps = &dmn->info.caps;
+ }
+
+ if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
+ return 0;
+
+ vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ if (!vport_cap) {
+ mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
+ misc->source_port);
+ return -EINVAL;
+ }
+
+ if (vport_cap->vport_gvmi)
+ MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
+
+ misc->source_port = 0;
+ return 0;
+}
+
+static void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask)
+{
+ dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+
+ sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
+ sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+ sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
+}
+
+struct mlx5dr_ste_ctx ste_ctx_v1 = {
+ /* Builders */
+ .build_eth_l2_src_dst_init = &dr_ste_v1_build_eth_l2_src_dst_init,
+ .build_eth_l3_ipv6_src_init = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+ .build_eth_l3_ipv6_dst_init = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+ .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+ .build_eth_l2_src_init = &dr_ste_v1_build_eth_l2_src_init,
+ .build_eth_l2_dst_init = &dr_ste_v1_build_eth_l2_dst_init,
+ .build_eth_l2_tnl_init = &dr_ste_v1_build_eth_l2_tnl_init,
+ .build_eth_l3_ipv4_misc_init = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+ .build_eth_ipv6_l3_l4_init = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+ .build_mpls_init = &dr_ste_v1_build_mpls_init,
+ .build_tnl_gre_init = &dr_ste_v1_build_tnl_gre_init,
+ .build_tnl_mpls_init = &dr_ste_v1_build_tnl_mpls_init,
+ .build_icmp_init = &dr_ste_v1_build_icmp_init,
+ .build_general_purpose_init = &dr_ste_v1_build_general_purpose_init,
+ .build_eth_l4_misc_init = &dr_ste_v1_build_eth_l4_misc_init,
+ .build_tnl_vxlan_gpe_init = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+ .build_tnl_geneve_init = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+ .build_register_0_init = &dr_ste_v1_build_register_0_init,
+ .build_register_1_init = &dr_ste_v1_build_register_1_init,
+ .build_src_gvmi_qpn_init = &dr_ste_v1_build_src_gvmi_qpn_init,
+ /* Getters and Setters */
+ .ste_init = &dr_ste_v1_init,
+ .set_next_lu_type = &dr_ste_v1_set_next_lu_type,
+ .get_next_lu_type = &dr_ste_v1_get_next_lu_type,
+ .set_miss_addr = &dr_ste_v1_set_miss_addr,
+ .get_miss_addr = &dr_ste_v1_get_miss_addr,
+ .set_hit_addr = &dr_ste_v1_set_hit_addr,
+ .set_byte_mask = &dr_ste_v1_set_byte_mask,
+ .get_byte_mask = &dr_ste_v1_get_byte_mask,
+ /* Actions */
+ .set_actions_rx = &dr_ste_v1_set_actions_rx,
+ .set_actions_tx = &dr_ste_v1_set_actions_tx,
+ .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
+ .modify_field_arr = dr_ste_v1_action_modify_field_arr,
+ .set_action_set = &dr_ste_v1_set_action_set,
+ .set_action_add = &dr_ste_v1_set_action_add,
+ .set_action_copy = &dr_ste_v1_set_action_copy,
+ .set_action_decap_l3_list = &dr_ste_v1_set_action_decap_l3_list,
+ /* Send */
+ .prepare_for_postsend = &dr_ste_v1_prepare_for_postsend,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 51880df26724..4af0e4e6a13c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -120,6 +120,7 @@ struct mlx5dr_ste_htbl;
struct mlx5dr_match_param;
struct mlx5dr_cmd_caps;
struct mlx5dr_matcher_rx_tx;
+struct mlx5dr_ste_ctx;
struct mlx5dr_ste {
u8 *hw_ste;
@@ -154,7 +155,7 @@ struct mlx5dr_ste_htbl_ctrl {
};
struct mlx5dr_ste_htbl {
- u8 lu_type;
+ u16 lu_type;
u16 byte_mask;
u32 refcount;
struct mlx5dr_icm_chunk *chunk;
@@ -190,7 +191,7 @@ struct mlx5dr_ste_build {
u8 vhca_id_valid:1;
struct mlx5dr_domain *dmn;
struct mlx5dr_cmd_caps *caps;
- u8 lu_type;
+ u16 lu_type;
u16 byte_mask;
u8 bit_mask[DR_STE_SIZE_MASK];
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
@@ -201,7 +202,7 @@ struct mlx5dr_ste_build {
struct mlx5dr_ste_htbl *
mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size,
- u8 lu_type, u16 byte_mask);
+ u16 lu_type, u16 byte_mask);
int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
@@ -219,35 +220,84 @@ static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
/* STE utils */
u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
-void mlx5dr_ste_init(u8 *hw_ste_p, u8 lu_type, u8 entry_type, u16 gvmi);
-void mlx5dr_ste_always_hit_htbl(struct mlx5dr_ste *ste,
- struct mlx5dr_ste_htbl *next_htbl);
-void mlx5dr_ste_set_miss_addr(u8 *hw_ste, u64 miss_addr);
-u64 mlx5dr_ste_get_miss_addr(u8 *hw_ste);
-void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
-void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
-void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 miss_addr);
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste, u64 icm_addr, u32 ht_size);
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste,
+ struct mlx5dr_ste_htbl *next_htbl);
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 ste_location);
-void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
-void mlx5dr_ste_set_counter_id(u8 *hw_ste_p, u32 ctr_id);
-void mlx5dr_ste_set_tx_encap(void *hw_ste_p, u32 reformat_id,
- int size, bool encap_l3);
-void mlx5dr_ste_set_rx_decap(u8 *hw_ste_p);
-void mlx5dr_ste_set_rx_decap_l3(u8 *hw_ste_p, bool vlan);
-void mlx5dr_ste_set_rx_pop_vlan(u8 *hw_ste_p);
-void mlx5dr_ste_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_tpid_pcp_dei_vid,
- bool go_back);
-void mlx5dr_ste_set_entry_type(u8 *hw_ste_p, u8 entry_type);
-u8 mlx5dr_ste_get_entry_type(u8 *hw_ste_p);
-void mlx5dr_ste_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
- u32 re_write_index);
-void mlx5dr_ste_set_go_back_bit(u8 *hw_ste_p);
u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
+#define MLX5DR_MAX_VLANS 2
+
+struct mlx5dr_ste_actions_attr {
+ u32 modify_index;
+ u16 modify_actions;
+ u32 decap_index;
+ u16 decap_actions;
+ u8 decap_with_vlan:1;
+ u64 final_icm_addr;
+ u32 flow_tag;
+ u32 ctr_id;
+ u16 gvmi;
+ u16 hit_gvmi;
+ u32 reformat_id;
+ u32 reformat_size;
+ struct {
+ int count;
+ u32 headers[MLX5DR_MAX_VLANS];
+ } vlans;
+};
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_domain *dmn,
+ u8 *action_type_set,
+ u8 *last_ste,
+ struct mlx5dr_ste_actions_attr *attr,
+ u32 *added_stes);
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 hw_field,
+ u8 shifter,
+ u8 length,
+ u32 data);
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+ __be64 *hw_action,
+ u8 dst_hw_field,
+ u8 dst_shifter,
+ u8 dst_len,
+ u8 src_hw_field,
+ u8 src_shifter);
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+ void *data,
+ u32 data_sz,
+ u8 *hw_action,
+ u32 hw_action_sz,
+ u16 *used_hw_action_num);
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
void mlx5dr_ste_free(struct mlx5dr_ste *ste,
struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher);
@@ -271,8 +321,6 @@ static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
return !ste->refcount;
}
-void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
- struct mlx5dr_ste_htbl *next_htbl);
bool mlx5dr_ste_equal_tag(void *src, void *dst);
int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
@@ -289,65 +337,85 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_build *builder,
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_icmp(struct mlx5dr_ste_build *sb,
+int mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_cmd_caps *caps,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+ struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
@@ -574,10 +642,10 @@ struct mlx5dr_match_misc3 {
u32 outer_vxlan_gpe_next_protocol:8;
u32 icmpv4_header_data;
u32 icmpv6_header_data;
- u32 icmpv6_code:8;
- u32 icmpv6_type:8;
- u32 icmpv4_code:8;
- u32 icmpv4_type:8;
+ u8 icmpv6_code;
+ u8 icmpv6_type;
+ u8 icmpv4_code;
+ u8 icmpv4_type;
u8 reserved_auto3[0x1c];
};
@@ -598,7 +666,8 @@ struct mlx5dr_esw_caps {
u64 drop_icm_address_tx;
u64 uplink_icm_address_rx;
u64 uplink_icm_address_tx;
- bool sw_owner;
+ u8 sw_owner:1;
+ u8 sw_owner_v2:1;
};
struct mlx5dr_cmd_vport_cap {
@@ -631,6 +700,9 @@ struct mlx5dr_cmd_caps {
bool rx_sw_owner;
bool tx_sw_owner;
bool fdb_sw_owner;
+ u8 rx_sw_owner_v2:1;
+ u8 tx_sw_owner_v2:1;
+ u8 fdb_sw_owner_v2:1;
u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps;
@@ -671,6 +743,7 @@ struct mlx5dr_domain {
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
struct mlx5dr_domain_cache cache;
+ struct mlx5dr_ste_ctx *ste_ctx;
};
struct mlx5dr_table_rx_tx {
@@ -725,6 +798,14 @@ struct mlx5dr_rule_member {
struct list_head use_ste_list;
};
+struct mlx5dr_ste_action_modify_field {
+ u16 hw_field;
+ u8 start;
+ u8 end;
+ u8 l3_type;
+ u8 l4_type;
+};
+
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
@@ -995,12 +1076,16 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size);
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
+
+void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
+ u8 *hw_ste_p, u32 ste_size);
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_htbl_connect_info *connect_info,
bool update_hw_ste);
-void mlx5dr_ste_set_formatted_ste(u16 gvmi,
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+ u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
u8 *formatted_ste,
@@ -1126,6 +1211,8 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
u32 group_id,
struct mlx5dr_cmd_fte_info *fte);
+bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps);
+
struct mlx5dr_fw_recalc_cs_ft {
u64 rx_icm_addr;
u32 table_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
index e01c3766c7de..83df6df6b459 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
@@ -5,91 +5,6 @@
#define MLX5_IFC_DR_H
enum {
- MLX5DR_ACTION_MDFY_HW_FLD_L2_0 = 0,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_1 = 1,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_2 = 2,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_0 = 3,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_1 = 4,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_2 = 5,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_3 = 6,
- MLX5DR_ACTION_MDFY_HW_FLD_L3_4 = 7,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_0 = 8,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_1 = 9,
- MLX5DR_ACTION_MDFY_HW_FLD_MPLS = 10,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_0 = 11,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_0 = 12,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_1 = 13,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_2 = 14,
- MLX5DR_ACTION_MDFY_HW_FLD_REG_3 = 15,
- MLX5DR_ACTION_MDFY_HW_FLD_L4_2 = 16,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_0 = 17,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_1 = 18,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_2 = 19,
- MLX5DR_ACTION_MDFY_HW_FLD_FLEX_3 = 20,
- MLX5DR_ACTION_MDFY_HW_FLD_L2_TNL_1 = 21,
- MLX5DR_ACTION_MDFY_HW_FLD_METADATA = 22,
- MLX5DR_ACTION_MDFY_HW_FLD_RESERVED = 23,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_OP_COPY = 0x1,
- MLX5DR_ACTION_MDFY_HW_OP_SET = 0x2,
- MLX5DR_ACTION_MDFY_HW_OP_ADD = 0x3,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_HDR_L3_NONE = 0x0,
- MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV4 = 0x1,
- MLX5DR_ACTION_MDFY_HW_HDR_L3_IPV6 = 0x2,
-};
-
-enum {
- MLX5DR_ACTION_MDFY_HW_HDR_L4_NONE = 0x0,
- MLX5DR_ACTION_MDFY_HW_HDR_L4_TCP = 0x1,
- MLX5DR_ACTION_MDFY_HW_HDR_L4_UDP = 0x2,
-};
-
-enum {
- MLX5DR_STE_LU_TYPE_NOP = 0x00,
- MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP = 0x05,
- MLX5DR_STE_LU_TYPE_ETHL2_TUNNELING_I = 0x0a,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_O = 0x06,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_I = 0x07,
- MLX5DR_STE_LU_TYPE_ETHL2_DST_D = 0x1b,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_O = 0x08,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_I = 0x09,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_D = 0x1c,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_O = 0x36,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_I = 0x37,
- MLX5DR_STE_LU_TYPE_ETHL2_SRC_DST_D = 0x38,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_O = 0x0d,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_I = 0x0e,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_DST_D = 0x1e,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_O = 0x0f,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_I = 0x10,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV6_SRC_D = 0x1f,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_O = 0x11,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_I = 0x12,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_5_TUPLE_D = 0x20,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_O = 0x29,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_I = 0x2a,
- MLX5DR_STE_LU_TYPE_ETHL3_IPV4_MISC_D = 0x2b,
- MLX5DR_STE_LU_TYPE_ETHL4_O = 0x13,
- MLX5DR_STE_LU_TYPE_ETHL4_I = 0x14,
- MLX5DR_STE_LU_TYPE_ETHL4_D = 0x21,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_O = 0x2c,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_I = 0x2d,
- MLX5DR_STE_LU_TYPE_ETHL4_MISC_D = 0x2e,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_O = 0x15,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_I = 0x24,
- MLX5DR_STE_LU_TYPE_MPLS_FIRST_D = 0x25,
- MLX5DR_STE_LU_TYPE_GRE = 0x16,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_0 = 0x22,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_1 = 0x23,
- MLX5DR_STE_LU_TYPE_FLEX_PARSER_TNL_HEADER = 0x19,
- MLX5DR_STE_LU_TYPE_GENERAL_PURPOSE = 0x18,
- MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_0 = 0x2f,
- MLX5DR_STE_LU_TYPE_STEERING_REGISTERS_1 = 0x30,
MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
new file mode 100644
index 000000000000..34c2bd17a8b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
@@ -0,0 +1,434 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef MLX5_IFC_DR_STE_V1_H
+#define MLX5_IFC_DR_STE_V1_H
+
+enum mlx5_ifc_ste_v1_modify_hdr_offset {
+ MLX5_MODIFY_HEADER_V1_QW_OFFSET = 0x20,
+};
+
+struct mlx5_ifc_ste_single_action_flow_tag_v1_bits {
+ u8 action_id[0x8];
+ u8 flow_tag[0x18];
+};
+
+struct mlx5_ifc_ste_single_action_modify_list_v1_bits {
+ u8 action_id[0x8];
+ u8 num_of_modify_actions[0x8];
+ u8 modify_actions_ptr[0x10];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_v1_bits {
+ u8 action_id[0x8];
+ u8 reserved_at_8[0x2];
+ u8 start_anchor[0x6];
+ u8 reserved_at_10[0x2];
+ u8 end_anchor[0x6];
+ u8 reserved_at_18[0x4];
+ u8 decap[0x1];
+ u8 vni_to_cqe[0x1];
+ u8 qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v1_bits {
+ u8 action_id[0x8];
+ u8 reserved_at_8[0x2];
+ u8 start_anchor[0x6];
+ u8 outer_l4_remove[0x1];
+ u8 reserved_at_11[0x1];
+ u8 start_offset[0x7];
+ u8 reserved_at_18[0x1];
+ u8 remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_copy_v1_bits {
+ u8 action_id[0x8];
+ u8 destination_dw_offset[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_17[0x2];
+ u8 destination_length[0x6];
+
+ u8 reserved_at_20[0x8];
+ u8 source_dw_offset[0x8];
+ u8 reserved_at_30[0x2];
+ u8 source_right_shifter[0x6];
+ u8 reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_ste_double_action_set_v1_bits {
+ u8 action_id[0x8];
+ u8 destination_dw_offset[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x2];
+ u8 destination_length[0x6];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_add_v1_bits {
+ u8 action_id[0x8];
+ u8 destination_dw_offset[0x8];
+ u8 reserved_at_10[0x2];
+ u8 destination_left_shifter[0x6];
+ u8 reserved_at_18[0x2];
+ u8 destination_length[0x6];
+
+ u8 add_value[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v1_bits {
+ u8 action_id[0x8];
+ u8 reserved_at_8[0x2];
+ u8 start_anchor[0x6];
+ u8 start_offset[0x7];
+ u8 reserved_at_17[0x9];
+
+ u8 inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits {
+ u8 action_id[0x8];
+ u8 reserved_at_8[0x2];
+ u8 start_anchor[0x6];
+ u8 start_offset[0x7];
+ u8 size[0x6];
+ u8 attributes[0x3];
+
+ u8 pointer[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_modify_action_list_v1_bits {
+ u8 action_id[0x8];
+ u8 modify_actions_pattern_pointer[0x18];
+
+ u8 number_of_modify_actions[0x8];
+ u8 modify_actions_argument_pointer[0x18];
+};
+
+struct mlx5_ifc_ste_match_bwc_v1_bits {
+ u8 entry_format[0x8];
+ u8 counter_id[0x18];
+
+ u8 miss_address_63_48[0x10];
+ u8 match_definer_ctx_idx[0x8];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 reserved_at_5a[0x1];
+ u8 match_polarity[0x1];
+ u8 reparse[0x1];
+ u8 reserved_at_5d[0x3];
+
+ u8 next_table_base_63_48[0x10];
+ u8 hash_definer_ctx_idx[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 hash_type[0x2];
+ u8 hash_after_actions[0x1];
+ u8 reserved_at_9e[0x2];
+
+ u8 byte_mask[0x10];
+ u8 next_entry_format[0x1];
+ u8 mask_mode[0x1];
+ u8 gvmi[0xe];
+
+ u8 action[0x40];
+};
+
+struct mlx5_ifc_ste_mask_and_match_v1_bits {
+ u8 entry_format[0x8];
+ u8 counter_id[0x18];
+
+ u8 miss_address_63_48[0x10];
+ u8 match_definer_ctx_idx[0x8];
+ u8 miss_address_39_32[0x8];
+
+ u8 miss_address_31_6[0x1a];
+ u8 reserved_at_5a[0x1];
+ u8 match_polarity[0x1];
+ u8 reparse[0x1];
+ u8 reserved_at_5d[0x3];
+
+ u8 next_table_base_63_48[0x10];
+ u8 hash_definer_ctx_idx[0x8];
+ u8 next_table_base_39_32_size[0x8];
+
+ u8 next_table_base_31_5_size[0x1b];
+ u8 hash_type[0x2];
+ u8 hash_after_actions[0x1];
+ u8 reserved_at_9e[0x2];
+
+ u8 action[0x60];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_v1_bits {
+ u8 reserved_at_0[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_loopback[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encapsulation_type[0x2];
+ u8 port[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+
+ u8 smac_47_16[0x20];
+
+ u8 smac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 reserved_at_60[0x6];
+ u8 tcp_syn[0x1];
+ u8 reserved_at_67[0x3];
+ u8 force_loopback[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_dst_v1_bits {
+ u8 reserved_at_0[0x1];
+ u8 sx_sniffer[0x1];
+ u8 functional_lb[0x1];
+ u8 ip_fragmented[0x1];
+ u8 qp_type[0x2];
+ u8 encapsulation_type[0x2];
+ u8 port[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 reserved_at_60[0x6];
+ u8 tcp_syn[0x1];
+ u8 reserved_at_67[0x3];
+ u8 force_lb[0x1];
+ u8 l2_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 l4_ok[0x1];
+ u8 second_vlan_qualifier[0x2];
+ u8 second_priority[0x3];
+ u8 second_cfi[0x1];
+ u8 second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_dst_v1_bits {
+ u8 dmac_47_16[0x20];
+
+ u8 smac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 reserved_at_50[0x2];
+ u8 functional_lb[0x1];
+ u8 reserved_at_53[0x5];
+ u8 port[0x2];
+ u8 l3_type[0x2];
+ u8 reserved_at_5c[0x2];
+ u8 first_vlan_qualifier[0x2];
+
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+ u8 smac_15_0[0x10];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_v1_bits {
+ u8 source_address[0x20];
+
+ u8 destination_address[0x20];
+
+ u8 source_port[0x10];
+ u8 destination_port[0x10];
+
+ u8 reserved_at_60[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 fragmented[0x1];
+ u8 tcp_ns[0x1];
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ u8 protocol[0x8];
+};
+
+struct mlx5_ifc_ste_eth_l2_tnl_v1_bits {
+ u8 l2_tunneling_network_id[0x20];
+
+ u8 dmac_47_16[0x20];
+
+ u8 dmac_15_0[0x10];
+ u8 l3_ethertype[0x10];
+
+ u8 reserved_at_60[0x3];
+ u8 ip_fragmented[0x1];
+ u8 reserved_at_64[0x2];
+ u8 encp_type[0x2];
+ u8 reserved_at_68[0x2];
+ u8 l3_type[0x2];
+ u8 l4_type[0x2];
+ u8 first_vlan_qualifier[0x2];
+ u8 first_priority[0x3];
+ u8 first_cfi[0x1];
+ u8 first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_misc_v1_bits {
+ u8 identification[0x10];
+ u8 flags[0x3];
+ u8 fragment_offset[0xd];
+
+ u8 total_length[0x10];
+ u8 checksum[0x10];
+
+ u8 version[0x4];
+ u8 ihl[0x4];
+ u8 time_to_live[0x8];
+ u8 reserved_at_50[0x10];
+
+ u8 reserved_at_60[0x1c];
+ u8 voq_internal_prio[0x4];
+};
+
+struct mlx5_ifc_ste_eth_l4_v1_bits {
+ u8 ipv6_version[0x4];
+ u8 reserved_at_4[0x4];
+ u8 dscp[0x6];
+ u8 ecn[0x2];
+ u8 ipv6_hop_limit[0x8];
+ u8 protocol[0x8];
+
+ u8 src_port[0x10];
+ u8 dst_port[0x10];
+
+ u8 first_fragment[0x1];
+ u8 reserved_at_41[0xb];
+ u8 flow_label[0x14];
+
+ u8 tcp_data_offset[0x4];
+ u8 l4_ok[0x1];
+ u8 l3_ok[0x1];
+ u8 fragmented[0x1];
+ u8 tcp_ns[0x1];
+ u8 tcp_cwr[0x1];
+ u8 tcp_ece[0x1];
+ u8 tcp_urg[0x1];
+ u8 tcp_ack[0x1];
+ u8 tcp_psh[0x1];
+ u8 tcp_rst[0x1];
+ u8 tcp_syn[0x1];
+ u8 tcp_fin[0x1];
+ u8 ipv6_paylen[0x10];
+};
+
+struct mlx5_ifc_ste_eth_l4_misc_v1_bits {
+ u8 window_size[0x10];
+ u8 urgent_pointer[0x10];
+
+ u8 ack_num[0x20];
+
+ u8 seq_num[0x20];
+
+ u8 length[0x10];
+ u8 checksum[0x10];
+};
+
+struct mlx5_ifc_ste_mpls_v1_bits {
+ u8 reserved_at_0[0x15];
+ u8 mpls_ok[0x1];
+ u8 mpls4_s_bit[0x1];
+ u8 mpls4_qualifier[0x1];
+ u8 mpls3_s_bit[0x1];
+ u8 mpls3_qualifier[0x1];
+ u8 mpls2_s_bit[0x1];
+ u8 mpls2_qualifier[0x1];
+ u8 mpls1_s_bit[0x1];
+ u8 mpls1_qualifier[0x1];
+ u8 mpls0_s_bit[0x1];
+ u8 mpls0_qualifier[0x1];
+
+ u8 mpls0_label[0x14];
+ u8 mpls0_exp[0x3];
+ u8 mpls0_s_bos[0x1];
+ u8 mpls0_ttl[0x8];
+
+ u8 mpls1_label[0x20];
+
+ u8 mpls2_label[0x20];
+};
+
+struct mlx5_ifc_ste_gre_v1_bits {
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 strict_src_route[0x1];
+ u8 recur[0x3];
+ u8 flags[0x5];
+ u8 version[0x3];
+ u8 gre_protocol[0x10];
+
+ u8 reserved_at_20[0x20];
+
+ u8 gre_key_h[0x18];
+ u8 gre_key_l[0x8];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_src_gvmi_qp_v1_bits {
+ u8 loopback_synd[0x8];
+ u8 reserved_at_8[0x7];
+ u8 functional_lb[0x1];
+ u8 source_gvmi[0x10];
+
+ u8 force_lb[0x1];
+ u8 reserved_at_21[0x1];
+ u8 source_is_requestor[0x1];
+ u8 reserved_at_23[0x5];
+ u8 source_qp[0x18];
+
+ u8 reserved_at_40[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_icmp_v1_bits {
+ u8 icmp_payload_data[0x20];
+
+ u8 icmp_header_data[0x20];
+
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 reserved_at_50[0x10];
+
+ u8 reserved_at_60[0x20];
+};
+
+#endif /* MLX5_IFC_DR_STE_V1_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 4177786b8eaf..612b0ac31db2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -124,7 +124,10 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
+ return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+ (MLX5_CAP_GEN(dev, steering_format_version) <=
+ MLX5_STEERING_FORMAT_CONNECTX_6DX));
}
/* buddy functions & structure */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bdafc85fd874..e05c5c0f3ae1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -36,6 +36,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
+#include "sf/sf.h"
/* Mutex to hold while enabling or disabling RoCE */
static DEFINE_MUTEX(mlx5_roce_en_lock);
@@ -1160,6 +1161,18 @@ EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
*/
u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev)
{
- return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev);
+ return MLX5_SPECIAL_VPORTS(dev) + mlx5_core_max_vfs(dev) + mlx5_sf_max_functions(dev);
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports);
+
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out)
+{
+ u16 opmod = (MLX5_CAP_GENERAL << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
+ u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
+
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+ MLX5_SET(query_hca_cap_in, in, function_id, function_id);
+ MLX5_SET(query_hca_cap_in, in, other_function, true);
+ return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
index 5d9ddf36fb4e..e6f677e42007 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
@@ -267,7 +267,7 @@ struct mlxfw_mfa2_file *mlxfw_mfa2_file_init(const struct firmware *fw)
const void *first_tlv_ptr;
const void *cb_top_ptr;
- mfa2_file = kcalloc(1, sizeof(*mfa2_file), GFP_KERNEL);
+ mfa2_file = kzalloc(sizeof(*mfa2_file), GFP_KERNEL);
if (!mfa2_file)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 685037e052af..52fdc34251ba 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -84,6 +84,7 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
+ atomic_t active_ports_count;
bool fw_flash_in_progress;
struct {
struct devlink_health_reporter *fw_fatal;
@@ -96,8 +97,36 @@ struct mlxsw_core {
#define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
-static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
+static u64 mlxsw_ports_occ_get(void *priv)
{
+ struct mlxsw_core *mlxsw_core = priv;
+
+ return atomic_read(&mlxsw_core->active_ports_count);
+}
+
+static int mlxsw_core_resources_ports_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params ports_num_params;
+ u32 max_ports;
+
+ max_ports = mlxsw_core->max_ports - 1;
+ devlink_resource_size_params_init(&ports_num_params, max_ports,
+ max_ports, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink,
+ DEVLINK_RESOURCE_GENERIC_NAME_PORTS,
+ max_ports, MLXSW_CORE_RESOURCE_PORTS,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &ports_num_params);
+}
+
+static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core, bool reload)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ int err;
+
/* Switch ports are numbered from 1 to queried value */
if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
@@ -110,11 +139,30 @@ static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
if (!mlxsw_core->ports)
return -ENOMEM;
+ if (!reload) {
+ err = mlxsw_core_resources_ports_register(mlxsw_core);
+ if (err)
+ goto err_resources_ports_register;
+ }
+ atomic_set(&mlxsw_core->active_ports_count, 0);
+ devlink_resource_occ_get_register(devlink, MLXSW_CORE_RESOURCE_PORTS,
+ mlxsw_ports_occ_get, mlxsw_core);
+
return 0;
+
+err_resources_ports_register:
+ kfree(mlxsw_core->ports);
+ return err;
}
-static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
+static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core, bool reload)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+
+ devlink_resource_occ_get_unregister(devlink, MLXSW_CORE_RESOURCE_PORTS);
+ if (!reload)
+ devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
+
kfree(mlxsw_core->ports);
}
@@ -1897,7 +1945,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_register_resources;
}
- err = mlxsw_ports_init(mlxsw_core);
+ err = mlxsw_ports_init(mlxsw_core, reload);
if (err)
goto err_ports_init;
@@ -1986,7 +2034,7 @@ err_devlink_register:
err_emad_init:
kfree(mlxsw_core->lag.mapping);
err_alloc_lag_mapping:
- mlxsw_ports_fini(mlxsw_core);
+ mlxsw_ports_fini(mlxsw_core, reload);
err_ports_init:
if (!reload)
devlink_resources_unregister(devlink, NULL);
@@ -2056,7 +2104,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
- mlxsw_ports_fini(mlxsw_core);
+ mlxsw_ports_fini(mlxsw_core, reload);
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
@@ -2755,16 +2803,25 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
const unsigned char *switch_id,
unsigned char switch_id_len)
{
- return __mlxsw_core_port_init(mlxsw_core, local_port,
- DEVLINK_PORT_FLAVOUR_PHYSICAL,
- port_number, split, split_port_subnumber,
- splittable, lanes,
- switch_id, switch_id_len);
+ int err;
+
+ err = __mlxsw_core_port_init(mlxsw_core, local_port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_number, split, split_port_subnumber,
+ splittable, lanes,
+ switch_id, switch_id_len);
+ if (err)
+ return err;
+
+ atomic_inc(&mlxsw_core->active_ports_count);
+ return 0;
}
EXPORT_SYMBOL(mlxsw_core_port_init);
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
{
+ atomic_dec(&mlxsw_core->active_ports_count);
+
__mlxsw_core_port_fini(mlxsw_core, local_port);
}
EXPORT_SYMBOL(mlxsw_core_port_fini);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 6b3ccbf6b238..8af7d9d03475 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -19,6 +19,11 @@
#include "cmd.h"
#include "resources.h"
+enum mlxsw_core_resource_id {
+ MLXSW_CORE_RESOURCE_PORTS = 1,
+ MLXSW_CORE_RESOURCE_MAX,
+};
+
struct mlxsw_core;
struct mlxsw_core_port;
struct mlxsw_driver;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 4eeae8d78006..d0052537e627 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -323,8 +323,8 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
struct pci_dev *pdev = mlxsw_pci->pdev;
dma_addr_t mapaddr;
- mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
- if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
+ mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
@@ -342,7 +342,7 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
if (!frag_len)
return;
- pci_unmap_single(pdev, mapaddr, frag_len, direction);
+ dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
@@ -858,9 +858,9 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
tasklet_setup(&q->tasklet, q_ops->tasklet);
mem_item->size = MLXSW_PCI_AQ_SIZE;
- mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
- mem_item->size,
- &mem_item->mapaddr);
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size, &mem_item->mapaddr,
+ GFP_KERNEL);
if (!mem_item->buf)
return -ENOMEM;
@@ -890,8 +890,8 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err_q_ops_init:
kfree(q->elem_info);
err_elem_info_alloc:
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
return err;
}
@@ -903,8 +903,8 @@ static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
q_ops->fini(mlxsw_pci, q);
kfree(q->elem_info);
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
@@ -1273,9 +1273,9 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
mem_item = &mlxsw_pci->fw_area.items[i];
mem_item->size = MLXSW_PCI_PAGE_SIZE;
- mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
- mem_item->size,
- &mem_item->mapaddr);
+ mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
+ mem_item->size,
+ &mem_item->mapaddr, GFP_KERNEL);
if (!mem_item->buf) {
err = -ENOMEM;
goto err_alloc;
@@ -1304,8 +1304,8 @@ err_alloc:
for (i--; i >= 0; i--) {
mem_item = &mlxsw_pci->fw_area.items[i];
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
kfree(mlxsw_pci->fw_area.items);
return err;
@@ -1321,8 +1321,8 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
mem_item = &mlxsw_pci->fw_area.items[i];
- pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
- mem_item->buf, mem_item->mapaddr);
+ dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
+ mem_item->buf, mem_item->mapaddr);
}
kfree(mlxsw_pci->fw_area.items);
}
@@ -1347,8 +1347,8 @@ static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
int err = 0;
mbox->size = MLXSW_CMD_MBOX_SIZE;
- mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
- &mbox->mapaddr);
+ mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
+ &mbox->mapaddr, GFP_KERNEL);
if (!mbox->buf) {
dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
err = -ENOMEM;
@@ -1362,8 +1362,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
{
struct pci_dev *pdev = mlxsw_pci->pdev;
- pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
- mbox->mapaddr);
+ dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
+ mbox->mapaddr);
}
static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
@@ -1848,17 +1848,11 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
- goto err_pci_set_dma_mask;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_pci_set_dma_mask;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index a6956cfc9cb1..d9d9e1f488f9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -52,7 +52,7 @@
#define MLXSW_SP_RESOURCE_NAME_COUNTERS_RIF "rif"
enum mlxsw_sp_resource_id {
- MLXSW_SP_RESOURCE_KVD = 1,
+ MLXSW_SP_RESOURCE_KVD = MLXSW_CORE_RESOURCE_MAX,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
@@ -328,15 +328,16 @@ struct mlxsw_sp_port_type_speed_ops {
u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
void (*from_ptys_link)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- u8 width, unsigned long *mode);
+ unsigned long *mode);
u32 (*from_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto);
- void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
- bool carrier_ok, u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd);
+ void (*from_ptys_link_mode)(struct mlxsw_sp *mlxsw_sp,
+ bool carrier_ok, u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd);
int (*ptys_max_speed)(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed);
- u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
+ u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp,
+ const struct ethtool_link_ksettings *cmd);
+ u32 (*to_ptys_speed_lanes)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
- u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
void (*reg_ptys_eth_pack)(struct mlxsw_sp *mlxsw_sp, char *payload,
u8 local_port, u32 proto_admin, bool autoneg);
void (*reg_ptys_eth_unpack)(struct mlxsw_sp *mlxsw_sp, char *payload,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 540616469e28..bd7f873f6290 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -858,7 +858,7 @@ static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
static void
mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
- u8 width, struct ethtool_link_ksettings *cmd)
+ struct ethtool_link_ksettings *cmd)
{
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -869,13 +869,13 @@ mlxsw_sp_port_get_link_supported(struct mlxsw_sp *mlxsw_sp, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
ops->from_ptys_supported_port(mlxsw_sp, eth_proto_cap, cmd);
- ops->from_ptys_link(mlxsw_sp, eth_proto_cap, width,
+ ops->from_ptys_link(mlxsw_sp, eth_proto_cap,
cmd->link_modes.supported);
}
static void
mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
- u32 eth_proto_admin, bool autoneg, u8 width,
+ u32 eth_proto_admin, bool autoneg,
struct ethtool_link_ksettings *cmd)
{
const struct mlxsw_sp_port_type_speed_ops *ops;
@@ -886,7 +886,7 @@ mlxsw_sp_port_get_link_advertise(struct mlxsw_sp *mlxsw_sp,
return;
ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
- ops->from_ptys_link(mlxsw_sp, eth_proto_admin, width,
+ ops->from_ptys_link(mlxsw_sp, eth_proto_admin,
cmd->link_modes.advertising);
}
@@ -960,16 +960,14 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
ops = mlxsw_sp->port_type_speed_ops;
autoneg = mlxsw_sp_port->link.autoneg;
- mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
- mlxsw_sp_port->mapping.width, cmd);
+ mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap, cmd);
- mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg,
- mlxsw_sp_port->mapping.width, cmd);
+ mlxsw_sp_port_get_link_advertise(mlxsw_sp, eth_proto_admin, autoneg, cmd);
cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
- ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
- eth_proto_oper, cmd);
+ ops->from_ptys_link_mode(mlxsw_sp, netif_carrier_ok(dev),
+ eth_proto_oper, cmd);
return 0;
}
@@ -997,14 +995,13 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
eth_proto_new = autoneg ?
- ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width,
- cmd) :
- ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width,
- cmd->base.speed);
+ ops->to_ptys_advert_link(mlxsw_sp, cmd) :
+ ops->to_ptys_speed_lanes(mlxsw_sp, mlxsw_sp_port->mapping.width,
+ cmd);
eth_proto_new = eth_proto_new & eth_proto_cap;
if (!eth_proto_new) {
- netdev_err(dev, "No supported speed requested\n");
+ netdev_err(dev, "No supported speed or lanes requested\n");
return -EINVAL;
}
@@ -1063,20 +1060,21 @@ mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
}
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
- .get_drvinfo = mlxsw_sp_port_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_link_ext_state = mlxsw_sp_port_get_link_ext_state,
- .get_pauseparam = mlxsw_sp_port_get_pauseparam,
- .set_pauseparam = mlxsw_sp_port_set_pauseparam,
- .get_strings = mlxsw_sp_port_get_strings,
- .set_phys_id = mlxsw_sp_port_set_phys_id,
- .get_ethtool_stats = mlxsw_sp_port_get_stats,
- .get_sset_count = mlxsw_sp_port_get_sset_count,
- .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
- .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
- .get_module_info = mlxsw_sp_get_module_info,
- .get_module_eeprom = mlxsw_sp_get_module_eeprom,
- .get_ts_info = mlxsw_sp_get_ts_info,
+ .cap_link_lanes_supported = true,
+ .get_drvinfo = mlxsw_sp_port_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ext_state = mlxsw_sp_port_get_link_ext_state,
+ .get_pauseparam = mlxsw_sp_port_get_pauseparam,
+ .set_pauseparam = mlxsw_sp_port_set_pauseparam,
+ .get_strings = mlxsw_sp_port_get_strings,
+ .set_phys_id = mlxsw_sp_port_set_phys_id,
+ .get_ethtool_stats = mlxsw_sp_port_get_stats,
+ .get_sset_count = mlxsw_sp_port_get_sset_count,
+ .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
+ .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
+ .get_module_info = mlxsw_sp_get_module_info,
+ .get_module_eeprom = mlxsw_sp_get_module_eeprom,
+ .get_ts_info = mlxsw_sp_get_ts_info,
};
struct mlxsw_sp1_port_link_mode {
@@ -1198,7 +1196,7 @@ mlxsw_sp1_from_ptys_supported_port(struct mlxsw_sp *mlxsw_sp,
static void
mlxsw_sp1_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- u8 width, unsigned long *mode)
+ unsigned long *mode)
{
int i;
@@ -1223,19 +1221,21 @@ mlxsw_sp1_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
}
static void
-mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
- u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd)
+mlxsw_sp1_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
+ int i;
+
+ cmd->link_mode = -1;
if (!carrier_ok)
return;
- cmd->base.speed = mlxsw_sp1_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
- if (cmd->base.speed != SPEED_UNKNOWN)
- cmd->base.duplex = DUPLEX_FULL;
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp1_port_link_mode[i].mask)
+ cmd->link_mode = mlxsw_sp1_port_link_mode[i].mask_ethtool;
+ }
}
static int mlxsw_sp1_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
@@ -1260,7 +1260,7 @@ static int mlxsw_sp1_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_
}
static u32
-mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
+mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
@@ -1274,14 +1274,17 @@ mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
return ptys_proto;
}
-static u32 mlxsw_sp1_to_ptys_speed(struct mlxsw_sp *mlxsw_sp, u8 width,
- u32 speed)
+static u32 mlxsw_sp1_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
+ const struct ethtool_link_ksettings *cmd)
{
u32 ptys_proto = 0;
int i;
+ if (cmd->lanes > width)
+ return ptys_proto;
+
for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
- if (speed == mlxsw_sp1_port_link_mode[i].speed)
+ if (cmd->base.speed == mlxsw_sp1_port_link_mode[i].speed)
ptys_proto |= mlxsw_sp1_port_link_mode[i].mask;
}
return ptys_proto;
@@ -1321,10 +1324,10 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp1_from_ptys_link,
.from_ptys_speed = mlxsw_sp1_from_ptys_speed,
- .from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
+ .from_ptys_link_mode = mlxsw_sp1_from_ptys_link_mode,
.ptys_max_speed = mlxsw_sp1_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
- .to_ptys_speed = mlxsw_sp1_to_ptys_speed,
+ .to_ptys_speed_lanes = mlxsw_sp1_to_ptys_speed_lanes,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
.ptys_proto_cap_masked_get = mlxsw_sp1_ptys_proto_cap_masked_get,
@@ -1486,7 +1489,8 @@ struct mlxsw_sp2_port_link_mode {
int m_ethtool_len;
u32 mask;
u32 speed;
- u8 mask_width;
+ u32 width;
+ u8 mask_sup_width;
};
static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
@@ -1494,105 +1498,117 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M,
.mask_ethtool = mlxsw_sp2_mask_ethtool_sgmii_100m,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_SGMII_100M_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII,
.mask_ethtool = mlxsw_sp2_mask_ethtool_1000base_x_sgmii,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_1000BASE_X_SGMII_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_1000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
.mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_5000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xfi_xaui_1_10g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XFI_XAUI_1_10G_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_10000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G,
.mask_ethtool = mlxsw_sp2_mask_ethtool_xlaui_4_xlppi_4_40g,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_XLAUI_4_XLPPI_4_40G_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_40000,
+ .width = 4,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_25GAUI_1_25GBASE_CR_KR,
.mask_ethtool = mlxsw_sp2_mask_ethtool_25gaui_1_25gbase_cr_kr,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_25GAUI_1_25GBASE_CR_KR_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_25000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_2_LAUI_2_50GBASE_CR2_KR2,
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_2_laui_2_50gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_2_LAUI_2_50GBASE_CR2_KR2_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X |
MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_50000,
+ .width = 2,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR,
.mask_ethtool = mlxsw_sp2_mask_ethtool_50gaui_1_laui_1_50gbase_cr_kr,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_50GAUI_1_LAUI_1_50GBASE_CR_KR_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
.speed = SPEED_50000,
+ .width = 1,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_caui_4_100gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_CAUI_4_100GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_100000,
+ .width = 4,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2,
.mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_2_100GBASE_CR2_KR2_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
.speed = SPEED_100000,
+ .width = 2,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X |
MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_200000,
+ .width = 4,
},
{
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
.mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_8X,
.speed = SPEED_400000,
+ .width = 8,
},
};
@@ -1619,14 +1635,12 @@ mlxsw_sp2_set_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
static void
mlxsw_sp2_from_ptys_link(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto,
- u8 width, unsigned long *mode)
+ unsigned long *mode)
{
- u8 mask_width = mlxsw_sp_port_mask_width_get(width);
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if ((ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) &&
- (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask)
mlxsw_sp2_set_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
mode);
}
@@ -1646,19 +1660,24 @@ mlxsw_sp2_from_ptys_speed(struct mlxsw_sp *mlxsw_sp, u32 ptys_eth_proto)
}
static void
-mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
- u32 ptys_eth_proto,
- struct ethtool_link_ksettings *cmd)
+mlxsw_sp2_from_ptys_link_mode(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
+ u32 ptys_eth_proto,
+ struct ethtool_link_ksettings *cmd)
{
- cmd->base.speed = SPEED_UNKNOWN;
- cmd->base.duplex = DUPLEX_UNKNOWN;
+ struct mlxsw_sp2_port_link_mode link;
+ int i;
+
+ cmd->link_mode = -1;
if (!carrier_ok)
return;
- cmd->base.speed = mlxsw_sp2_from_ptys_speed(mlxsw_sp, ptys_eth_proto);
- if (cmd->base.speed != SPEED_UNKNOWN)
- cmd->base.duplex = DUPLEX_FULL;
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (ptys_eth_proto & mlxsw_sp2_port_link_mode[i].mask) {
+ link = mlxsw_sp2_port_link_mode[i];
+ cmd->link_mode = link.mask_ethtool[1];
+ }
+ }
}
static int mlxsw_sp2_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
@@ -1698,33 +1717,50 @@ mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
}
static u32
-mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
+mlxsw_sp2_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp,
const struct ethtool_link_ksettings *cmd)
{
- u8 mask_width = mlxsw_sp_port_mask_width_get(width);
u32 ptys_proto = 0;
int i;
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if ((mask_width & mlxsw_sp2_port_link_mode[i].mask_width) &&
- mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
+ if (mlxsw_sp2_test_bit_ethtool(&mlxsw_sp2_port_link_mode[i],
cmd->link_modes.advertising))
ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
}
return ptys_proto;
}
-static u32 mlxsw_sp2_to_ptys_speed(struct mlxsw_sp *mlxsw_sp,
- u8 width, u32 speed)
+static u32 mlxsw_sp2_to_ptys_speed_lanes(struct mlxsw_sp *mlxsw_sp, u8 width,
+ const struct ethtool_link_ksettings *cmd)
{
u8 mask_width = mlxsw_sp_port_mask_width_get(width);
+ struct mlxsw_sp2_port_link_mode link_mode;
u32 ptys_proto = 0;
int i;
+ if (cmd->lanes > width)
+ return ptys_proto;
+
for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
- if ((speed == mlxsw_sp2_port_link_mode[i].speed) &&
- (mask_width & mlxsw_sp2_port_link_mode[i].mask_width))
- ptys_proto |= mlxsw_sp2_port_link_mode[i].mask;
+ if (cmd->base.speed == mlxsw_sp2_port_link_mode[i].speed) {
+ link_mode = mlxsw_sp2_port_link_mode[i];
+
+ if (!cmd->lanes) {
+ /* If number of lanes was not set by user space,
+ * choose the link mode that supports the width
+ * of the port.
+ */
+ if (mask_width & link_mode.mask_sup_width)
+ ptys_proto |= link_mode.mask;
+ } else if (cmd->lanes == link_mode.width) {
+ /* Else if the number of lanes was set, choose
+ * the link mode that its actual width equals to
+ * it.
+ */
+ ptys_proto |= link_mode.mask;
+ }
+ }
}
return ptys_proto;
}
@@ -1764,10 +1800,10 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp2_from_ptys_link,
.from_ptys_speed = mlxsw_sp2_from_ptys_speed,
- .from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
+ .from_ptys_link_mode = mlxsw_sp2_from_ptys_link_mode,
.ptys_max_speed = mlxsw_sp2_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
- .to_ptys_speed = mlxsw_sp2_to_ptys_speed,
+ .to_ptys_speed_lanes = mlxsw_sp2_to_ptys_speed_lanes,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
.ptys_proto_cap_masked_get = mlxsw_sp2_ptys_proto_cap_masked_get,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 41424ee909a0..9ce90841f92d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4309,11 +4309,18 @@ static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
if (event != NEXTHOP_EVENT_REPLACE)
return 0;
- if (!info->is_grp)
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
info->extack);
- return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp, info->nh_grp,
- info->extack);
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
+ info->nh_grp,
+ info->extack);
+ default:
+ NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
+ return -EOPNOTSUPP;
+ }
}
static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
@@ -4321,13 +4328,17 @@ static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
{
const struct net_device *dev;
- if (info->is_grp)
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ dev = info->nh->dev;
+ return info->nh->gw_family || info->nh->is_reject ||
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+ case NH_NOTIFIER_INFO_TYPE_GRP:
/* Already validated earlier. */
return true;
-
- dev = info->nh->dev;
- return info->nh->gw_family || info->nh->is_reject ||
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
+ default:
+ return false;
+ }
}
static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
@@ -4410,11 +4421,22 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct nh_notifier_info *info)
{
- unsigned int nhs = info->is_grp ? info->nh_grp->num_nh : 1;
struct mlxsw_sp_nexthop_group_info *nhgi;
struct mlxsw_sp_nexthop *nh;
+ unsigned int nhs;
int err, i;
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
+ nhs = 1;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ nhs = info->nh_grp->num_nh;
+ break;
+ default:
+ return -EINVAL;
+ }
+
nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
if (!nhgi)
return -ENOMEM;
@@ -4427,12 +4449,18 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
int weight;
nh = &nhgi->nexthops[i];
- if (info->is_grp) {
- nh_obj = &info->nh_grp->nh_entries[i].nh;
- weight = info->nh_grp->nh_entries[i].weight;
- } else {
+ switch (info->type) {
+ case NH_NOTIFIER_INFO_TYPE_SINGLE:
nh_obj = info->nh;
weight = 1;
+ break;
+ case NH_NOTIFIER_INFO_TYPE_GRP:
+ nh_obj = &info->nh_grp->nh_entries[i].nh;
+ weight = info->nh_grp->nh_entries[i].weight;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_nexthop_obj_init;
}
err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
weight);
@@ -4915,6 +4943,25 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
}
static void
+mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib_entry_notifier_info *fen_info)
+{
+ u32 *p_dst = (u32 *) &fen_info->dst;
+ struct fib_rt_info fri;
+
+ fri.fi = fen_info->fi;
+ fri.tb_id = fen_info->tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = fen_info->dst_len;
+ fri.tos = fen_info->tos;
+ fri.type = fen_info->type;
+ fri.offload = false;
+ fri.trap = false;
+ fri.offload_failed = true;
+ fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
+}
+
+static void
mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
@@ -4935,6 +4982,7 @@ mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fri.type = fib4_entry->type;
fri.offload = should_offload;
fri.trap = !should_offload;
+ fri.offload_failed = false;
fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
@@ -4957,9 +5005,35 @@ mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fri.type = fib4_entry->type;
fri.offload = false;
fri.trap = false;
+ fri.offload_failed = false;
fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void
+mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+ int i;
+
+ /* In IPv6 a multipath route is represented using multiple routes, so
+ * we need to set the flags on all of them.
+ */
+ for (i = 0; i < nrt6; i++)
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
+ false, false, true);
+}
+#else
+static void
+mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
+ struct fib6_info **rt_arr,
+ unsigned int nrt6)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
static void
mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
@@ -4976,10 +5050,18 @@ mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
- fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
- !should_offload);
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
+ should_offload, !should_offload, false);
+}
+#else
+static void
+mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
}
+#endif
+#if IS_ENABLED(CONFIG_IPV6)
static void
mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
@@ -4990,8 +5072,16 @@ mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
common);
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
- fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
+ fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
+ false, false, false);
+}
+#else
+static void
+mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
}
+#endif
static void
mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
@@ -6976,6 +7066,8 @@ static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
+ &fib_event->fen_info);
}
fib_info_put(fib_event->fen_info.fi);
break;
@@ -6997,6 +7089,7 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_event *fib_event)
{
+ struct mlxsw_sp_fib6_event *fib6_event = &fib_event->fib6_event;
int err;
mlxsw_sp_span_respin(mlxsw_sp);
@@ -7008,6 +7101,9 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
+ fib6_event->rt_arr,
+ fib6_event->nrt6);
}
mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
@@ -7017,6 +7113,9 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
mlxsw_sp_router_fib_abort(mlxsw_sp);
+ mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
+ fib6_event->rt_arr,
+ fib6_event->nrt6);
}
mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
break;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index cea42f6ed89b..23b7e8d6386b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -527,7 +527,6 @@ mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
u8 state)
{
@@ -535,9 +534,6 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_vlan *bridge_vlan;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -657,56 +653,59 @@ err_port_bridge_vlan_learning_set:
return err;
}
-static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
- *mlxsw_sp_port,
- struct switchdev_trans *trans,
- unsigned long brport_flags)
+static int
+mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_brport_flags flags)
{
- if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
return 0;
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_UC,
- brport_flags & BR_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_FLOOD) {
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ MLXSW_SP_FLOOD_TYPE_UC,
+ flags.val & BR_FLOOD);
+ if (err)
+ return err;
+ }
- err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
- brport_flags & BR_LEARNING);
- if (err)
- return err;
+ if (flags.mask & BR_LEARNING) {
+ err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
+ bridge_port,
+ flags.val & BR_LEARNING);
+ if (err)
+ return err;
+ }
if (bridge_port->bridge_device->multicast_enabled)
goto out;
- err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
- MLXSW_SP_FLOOD_TYPE_MC,
- brport_flags &
- BR_MCAST_FLOOD);
- if (err)
- return err;
+ if (flags.mask & BR_MCAST_FLOOD) {
+ err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
+ bridge_port,
+ MLXSW_SP_FLOOD_TYPE_MC,
+ flags.val & BR_MCAST_FLOOD);
+ if (err)
+ return err;
+ }
out:
- memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
+ memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
return 0;
}
@@ -724,35 +723,26 @@ static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
}
static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
unsigned long ageing_clock_t)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
- if (switchdev_trans_ph_prepare(trans)) {
- if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
- ageing_time > MLXSW_SP_MAX_AGEING_TIME)
- return -ERANGE;
- else
- return 0;
- }
+ if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
+ ageing_time > MLXSW_SP_MAX_AGEING_TIME)
+ return -ERANGE;
return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
}
static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool vlan_enabled)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
@@ -765,16 +755,12 @@ static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
u16 vlan_proto)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (!switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_device))
return -EINVAL;
@@ -784,16 +770,12 @@ static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_p
}
static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_port_mrouter)
{
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
orig_dev);
if (!bridge_port)
@@ -825,7 +807,6 @@ static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
}
static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool mc_disabled)
{
@@ -834,9 +815,6 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_bridge_port *bridge_port;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -896,16 +874,12 @@ mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
static int
mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
bool is_mrouter)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
/* It's possible we failed to enslave the port, yet this
* operation is executed due to it being deferred.
*/
@@ -922,53 +896,52 @@ mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
int err;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
- trans,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
- err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
- err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.vlan_protocol);
break;
case SWITCHDEV_ATTR_ID_PORT_MROUTER:
- err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mrouter);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
- err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mc_disabled);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
- err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
+ err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
attr->orig_dev,
attr->u.mrouter);
break;
@@ -977,8 +950,7 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
break;
}
- if (switchdev_trans_ph_commit(trans))
- mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
return err;
}
@@ -1211,23 +1183,20 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
const struct switchdev_obj_port_vlan *vlan)
{
u16 pvid;
- u16 vid;
pvid = mlxsw_sp_rif_vid(mlxsw_sp, br_dev);
if (!pvid)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
- if (vid != pvid) {
- netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
- return -EBUSY;
- }
- } else {
- if (vid == pvid) {
- netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
- return -EBUSY;
- }
+ if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
+ if (vlan->vid != pvid) {
+ netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
+ return -EBUSY;
+ }
+ } else {
+ if (vlan->vid == pvid) {
+ netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
+ return -EBUSY;
}
}
@@ -1236,7 +1205,6 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
@@ -1244,14 +1212,12 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
- u16 vid;
if (netif_is_bridge_master(orig_dev)) {
int err = 0;
if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
- br_vlan_enabled(orig_dev) &&
- switchdev_trans_ph_prepare(trans))
+ br_vlan_enabled(orig_dev))
err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
orig_dev, vlan);
if (!err)
@@ -1259,9 +1225,6 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (WARN_ON(!bridge_port))
return -EINVAL;
@@ -1269,17 +1232,9 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
- vid, flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
+ vlan->vid, flag_untagged,
+ flag_pvid, extack);
}
static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
@@ -1716,8 +1671,7 @@ static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_mdb *mdb)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = mdb->obj.orig_dev;
@@ -1729,9 +1683,6 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_index;
int err = 0;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
if (!bridge_port)
return 0;
@@ -1813,7 +1764,6 @@ mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
@@ -1823,22 +1773,19 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
- err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
- extack);
- if (switchdev_trans_ph_prepare(trans)) {
- /* The event is emitted before the changes are actually
- * applied to the bridge. Therefore schedule the respin
- * call for later, so that the respin logic sees the
- * updated bridge state.
- */
- mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
- }
+ err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
+
+ /* The event is emitted before the changes are actually
+ * applied to the bridge. Therefore schedule the respin
+ * call for later, so that the respin logic sees the
+ * updated bridge state.
+ */
+ mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
- SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_MDB(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -1873,7 +1820,6 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *orig_dev = vlan->obj.orig_dev;
struct mlxsw_sp_bridge_port *bridge_port;
- u16 vid;
if (netif_is_bridge_master(orig_dev))
return -EOPNOTSUPP;
@@ -1885,8 +1831,7 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
if (!bridge_port->bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
+ mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
return 0;
}
@@ -3406,12 +3351,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct switchdev_trans *trans = port_obj_info->trans;
struct mlxsw_sp_bridge_device *bridge_device;
struct netlink_ext_ack *extack;
struct mlxsw_sp *mlxsw_sp;
struct net_device *br_dev;
- u16 vid;
extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
br_dev = netdev_master_upper_dev_get(vxlan_dev);
@@ -3424,9 +3367,6 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
port_obj_info->handled = true;
- if (switchdev_trans_ph_commit(trans))
- return 0;
-
bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
if (!bridge_device)
return -EINVAL;
@@ -3434,18 +3374,10 @@ mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
if (!bridge_device->vlan_enabled)
return 0;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
- vxlan_dev, vid,
- flag_untagged,
- flag_pvid, extack);
- if (err)
- return err;
- }
-
- return 0;
+ return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
+ vxlan_dev, vlan->vid,
+ flag_untagged,
+ flag_pvid, extack);
}
static void
@@ -3458,7 +3390,6 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
struct mlxsw_sp_bridge_device *bridge_device;
struct mlxsw_sp *mlxsw_sp;
struct net_device *br_dev;
- u16 vid;
br_dev = netdev_master_upper_dev_get(vxlan_dev);
if (!br_dev)
@@ -3477,9 +3408,8 @@ mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
if (!bridge_device->vlan_enabled)
return;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
- mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
- vxlan_dev, vid);
+ mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
+ vlan->vid);
}
static int
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index 42bc014136fe..93df3049cdc0 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -31,6 +31,8 @@ config KS8851
select MII
select CRC32
select EEPROM_93CX6
+ select PHYLIB
+ select MICREL_PHY
help
SPI driver for Micrel KS8851 SPI attached network chip.
@@ -40,6 +42,8 @@ config KS8851_MLL
select MII
select CRC32
select EEPROM_93CX6
+ select PHYLIB
+ select MICREL_PHY
help
This platform driver is for Micrel KS8851 Address/data bus
multiplexed network chip.
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 2b319e451121..e2eb0caeac82 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -358,6 +358,7 @@ union ks8851_tx_hdr {
* @vdd_reg: Optional regulator supplying the chip
* @vdd_io: Optional digital power supply for IO
* @gpio: Optional reset_n gpio
+ * @mii_bus: Pointer to MII bus structure
* @lock: Bus access lock callback
* @unlock: Bus access unlock callback
* @rdreg16: 16bit register read callback
@@ -403,6 +404,7 @@ struct ks8851_net {
struct regulator *vdd_reg;
struct regulator *vdd_io;
int gpio;
+ struct mii_bus *mii_bus;
void (*lock)(struct ks8851_net *ks,
unsigned long *flags);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 6fc7483aea03..2feed6ce19d3 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -23,6 +21,7 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "ks8851.h"
@@ -932,7 +931,25 @@ static int ks8851_phy_reg(int reg)
return KS_P1ANLPR;
}
- return 0x0;
+ return -EOPNOTSUPP;
+}
+
+static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ unsigned long flags;
+ int result;
+ int ksreg;
+
+ ksreg = ks8851_phy_reg(reg);
+ if (ksreg < 0)
+ return ksreg;
+
+ ks8851_lock(ks, &flags);
+ result = ks8851_rdreg16(ks, ksreg);
+ ks8851_unlock(ks, &flags);
+
+ return result;
}
/**
@@ -952,20 +969,13 @@ static int ks8851_phy_reg(int reg)
*/
static int ks8851_phy_read(struct net_device *dev, int phy_addr, int reg)
{
- struct ks8851_net *ks = netdev_priv(dev);
- unsigned long flags;
- int ksreg;
- int result;
+ int ret;
- ksreg = ks8851_phy_reg(reg);
- if (!ksreg)
+ ret = ks8851_phy_read_common(dev, phy_addr, reg);
+ if (ret < 0)
return 0x0; /* no error return allowed, so use zero */
- ks8851_lock(ks, &flags);
- result = ks8851_rdreg16(ks, ksreg);
- ks8851_unlock(ks, &flags);
-
- return result;
+ return ret;
}
static void ks8851_phy_write(struct net_device *dev,
@@ -976,13 +986,37 @@ static void ks8851_phy_write(struct net_device *dev,
int ksreg;
ksreg = ks8851_phy_reg(reg);
- if (ksreg) {
+ if (ksreg >= 0) {
ks8851_lock(ks, &flags);
ks8851_wrreg16(ks, ksreg, value);
ks8851_unlock(ks, &flags);
}
}
+static int ks8851_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct ks8851_net *ks = bus->priv;
+
+ if (phy_id != 0)
+ return -EOPNOTSUPP;
+
+ /* KS8851 PHY ID registers are swapped in HW, swap them back. */
+ if (reg == MII_PHYSID1)
+ reg = MII_PHYSID2;
+ else if (reg == MII_PHYSID2)
+ reg = MII_PHYSID1;
+
+ return ks8851_phy_read_common(ks->netdev, phy_id, reg);
+}
+
+static int ks8851_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+{
+ struct ks8851_net *ks = bus->priv;
+
+ ks8851_phy_write(ks->netdev, phy_id, reg, val);
+ return 0;
+}
+
/**
* ks8851_read_selftest - read the selftest memory info.
* @ks: The device state
@@ -1046,6 +1080,42 @@ int ks8851_resume(struct device *dev)
}
#endif
+static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "ks8851_eth_mii";
+ mii_bus->read = ks8851_mdio_read;
+ mii_bus->write = ks8851_mdio_write;
+ mii_bus->priv = ks;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)BIT(0));
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = mdiobus_register(mii_bus);
+ if (ret)
+ goto err_mdiobus_register;
+
+ ks->mii_bus = mii_bus;
+
+ return 0;
+
+err_mdiobus_register:
+ mdiobus_free(mii_bus);
+ return ret;
+}
+
+static void ks8851_unregister_mdiobus(struct ks8851_net *ks)
+{
+ mdiobus_unregister(ks->mii_bus);
+ mdiobus_free(ks->mii_bus);
+}
+
int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int msg_en)
{
@@ -1104,6 +1174,8 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
INIT_WORK(&ks->rxctrl_work, ks8851_rxctrl_work);
+ SET_NETDEV_DEV(netdev, dev);
+
/* setup EEPROM state */
ks->eeprom.data = ks;
ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
@@ -1120,6 +1192,10 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
dev_info(dev, "message enable is %d\n", msg_en);
+ ret = ks8851_register_mdiobus(ks, dev);
+ if (ret)
+ goto err_mdio;
+
/* set the default message enable */
ks->msg_enable = netif_msg_init(msg_en, NETIF_MSG_DRV |
NETIF_MSG_PROBE |
@@ -1128,7 +1204,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
skb_queue_head_init(&ks->txq);
netdev->ethtool_ops = &ks8851_ethtool_ops;
- SET_NETDEV_DEV(netdev, dev);
dev_set_drvdata(dev, ks);
@@ -1156,7 +1231,7 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
ret = register_netdev(netdev);
if (ret) {
dev_err(dev, "failed to register network device\n");
- goto err_netdev;
+ goto err_id;
}
netdev_info(netdev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
@@ -1165,8 +1240,9 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
return 0;
-err_netdev:
err_id:
+ ks8851_unregister_mdiobus(ks);
+err_mdio:
if (gpio_is_valid(gpio))
gpio_set_value(gpio, 0);
regulator_disable(ks->vdd_reg);
@@ -1180,6 +1256,8 @@ int ks8851_remove_common(struct device *dev)
{
struct ks8851_net *priv = dev_get_drvdata(dev);
+ ks8851_unregister_mdiobus(priv);
+
if (netif_msg_drv(priv))
dev_info(dev, "remove\n");
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 3bab0cb2b1a5..2e8fcce50f9d 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 4ec7f1615977..479406ecbaa3 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -8,8 +8,6 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#define DEBUG
-
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 3804310c853a..dbdfabff3b00 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1253,7 +1253,7 @@ static void lan743x_tx_release_desc(struct lan743x_tx *tx,
if (!(buffer_info->flags & TX_BUFFER_INFO_FLAG_ACTIVE))
goto done;
- descriptor_type = (descriptor->data0) &
+ descriptor_type = le32_to_cpu(descriptor->data0) &
TX_DESC_DATA0_DTYPE_MASK_;
if (descriptor_type == TX_DESC_DATA0_DTYPE_DATA_)
goto clean_up_data_descriptor;
@@ -1313,7 +1313,7 @@ static int lan743x_tx_next_index(struct lan743x_tx *tx, int index)
static void lan743x_tx_release_completed_descriptors(struct lan743x_tx *tx)
{
- while ((*tx->head_cpu_ptr) != (tx->last_head)) {
+ while (le32_to_cpu(*tx->head_cpu_ptr) != (tx->last_head)) {
lan743x_tx_release_desc(tx, tx->last_head, false);
tx->last_head = lan743x_tx_next_index(tx, tx->last_head);
}
@@ -1399,10 +1399,10 @@ static int lan743x_tx_frame_start(struct lan743x_tx *tx,
if (dma_mapping_error(dev, dma_ptr))
return -ENOMEM;
- tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
- tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
- tx_descriptor->data3 = (frame_length << 16) &
- TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
+ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
+ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
+ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
+ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
buffer_info->skb = NULL;
buffer_info->dma_ptr = dma_ptr;
@@ -1443,7 +1443,7 @@ static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
tx->frame_data0 |= TX_DESC_DATA0_IOC_;
}
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
/* move to next descriptor */
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
@@ -1487,7 +1487,7 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
/* wrap up previous descriptor */
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
/* move to next descriptor */
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
@@ -1513,10 +1513,10 @@ static int lan743x_tx_frame_add_fragment(struct lan743x_tx *tx,
return -ENOMEM;
}
- tx_descriptor->data1 = DMA_ADDR_LOW32(dma_ptr);
- tx_descriptor->data2 = DMA_ADDR_HIGH32(dma_ptr);
- tx_descriptor->data3 = (frame_length << 16) &
- TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_;
+ tx_descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(dma_ptr));
+ tx_descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(dma_ptr));
+ tx_descriptor->data3 = cpu_to_le32((frame_length << 16) &
+ TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_);
buffer_info->skb = NULL;
buffer_info->dma_ptr = dma_ptr;
@@ -1560,7 +1560,7 @@ static void lan743x_tx_frame_end(struct lan743x_tx *tx,
if (ignore_sync)
buffer_info->flags |= TX_BUFFER_INFO_FLAG_IGNORE_SYNC;
- tx_descriptor->data0 = tx->frame_data0;
+ tx_descriptor->data0 = cpu_to_le32(tx->frame_data0);
tx->frame_tail = lan743x_tx_next_index(tx, tx->frame_tail);
tx->last_tail = tx->frame_tail;
@@ -1926,15 +1926,6 @@ static int lan743x_rx_next_index(struct lan743x_rx *rx, int index)
return ((++index) % rx->ring_size);
}
-static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
-{
- int length = 0;
-
- length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
- return __netdev_alloc_skb(rx->adapter->netdev,
- length, GFP_ATOMIC | GFP_DMA);
-}
-
static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
{
/* update the tail once per 8 descriptors */
@@ -1943,36 +1934,57 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
index);
}
-static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
- struct sk_buff *skb)
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
{
+ struct net_device *netdev = rx->adapter->netdev;
+ struct device *dev = &rx->adapter->pdev->dev;
struct lan743x_rx_buffer_info *buffer_info;
+ unsigned int buffer_length, used_length;
struct lan743x_rx_descriptor *descriptor;
- int length = 0;
+ struct sk_buff *skb;
+ dma_addr_t dma_ptr;
+
+ buffer_length = netdev->mtu + ETH_HLEN + 4 + RX_HEAD_PADDING;
- length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- buffer_info->skb = skb;
- if (!(buffer_info->skb))
+ skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
+ if (!skb)
return -ENOMEM;
- buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
- buffer_info->skb->data,
- length,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&rx->adapter->pdev->dev,
- buffer_info->dma_ptr)) {
- buffer_info->dma_ptr = 0;
+ dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_ptr)) {
+ dev_kfree_skb_any(skb);
return -ENOMEM;
}
+ if (buffer_info->dma_ptr) {
+ /* sync used area of buffer only */
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_)
+ /* frame length is valid only if LS bit is set.
+ * it's a safe upper bound for the used area in this
+ * buffer.
+ */
+ used_length = min(RX_DESC_DATA0_FRAME_LENGTH_GET_
+ (le32_to_cpu(descriptor->data0)),
+ buffer_info->buffer_length);
+ else
+ used_length = buffer_info->buffer_length;
+ dma_sync_single_for_cpu(dev, buffer_info->dma_ptr,
+ used_length,
+ DMA_FROM_DEVICE);
+ dma_unmap_single_attrs(dev, buffer_info->dma_ptr,
+ buffer_info->buffer_length,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
- buffer_info->buffer_length = length;
- descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
- descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
+ buffer_info->skb = skb;
+ buffer_info->dma_ptr = dma_ptr;
+ buffer_info->buffer_length = buffer_length;
+ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
+ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
descriptor->data3 = 0;
- descriptor->data0 = (RX_DESC_DATA0_OWN_ |
- (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
- skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
+ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
+ (buffer_length & RX_DESC_DATA0_BUF_LENGTH_MASK_)));
lan743x_rx_update_tail(rx, index);
return 0;
@@ -1986,12 +1998,12 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- descriptor->data1 = DMA_ADDR_LOW32(buffer_info->dma_ptr);
- descriptor->data2 = DMA_ADDR_HIGH32(buffer_info->dma_ptr);
+ descriptor->data1 = cpu_to_le32(DMA_ADDR_LOW32(buffer_info->dma_ptr));
+ descriptor->data2 = cpu_to_le32(DMA_ADDR_HIGH32(buffer_info->dma_ptr));
descriptor->data3 = 0;
- descriptor->data0 = (RX_DESC_DATA0_OWN_ |
+ descriptor->data0 = cpu_to_le32((RX_DESC_DATA0_OWN_ |
((buffer_info->buffer_length) &
- RX_DESC_DATA0_BUF_LENGTH_MASK_));
+ RX_DESC_DATA0_BUF_LENGTH_MASK_)));
lan743x_rx_update_tail(rx, index);
}
@@ -2021,16 +2033,32 @@ static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
memset(buffer_info, 0, sizeof(*buffer_info));
}
-static int lan743x_rx_process_packet(struct lan743x_rx *rx)
+static struct sk_buff *
+lan743x_rx_trim_skb(struct sk_buff *skb, int frame_length)
+{
+ if (skb_linearize(skb)) {
+ dev_kfree_skb_irq(skb);
+ return NULL;
+ }
+ frame_length = max_t(int, 0, frame_length - RX_HEAD_PADDING - 2);
+ if (skb->len > frame_length) {
+ skb->tail -= skb->len - frame_length;
+ skb->len = frame_length;
+ }
+ return skb;
+}
+
+static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
{
- struct skb_shared_hwtstamps *hwtstamps = NULL;
+ int current_head_index = le32_to_cpu(*rx->head_cpu_ptr);
+ struct lan743x_rx_descriptor *descriptor, *desc_ext;
+ struct net_device *netdev = rx->adapter->netdev;
int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
- int current_head_index = *rx->head_cpu_ptr;
struct lan743x_rx_buffer_info *buffer_info;
- struct lan743x_rx_descriptor *descriptor;
+ int frame_length, buffer_length;
int extension_index = -1;
- int first_index = -1;
- int last_index = -1;
+ bool is_last, is_first;
+ struct sk_buff *skb;
if (current_head_index < 0 || current_head_index >= rx->ring_size)
goto done;
@@ -2038,163 +2066,120 @@ static int lan743x_rx_process_packet(struct lan743x_rx *rx)
if (rx->last_head < 0 || rx->last_head >= rx->ring_size)
goto done;
- if (rx->last_head != current_head_index) {
- descriptor = &rx->ring_cpu_ptr[rx->last_head];
- if (descriptor->data0 & RX_DESC_DATA0_OWN_)
- goto done;
+ if (rx->last_head == current_head_index)
+ goto done;
- if (!(descriptor->data0 & RX_DESC_DATA0_FS_))
- goto done;
+ descriptor = &rx->ring_cpu_ptr[rx->last_head];
+ if (le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_OWN_)
+ goto done;
+ buffer_info = &rx->buffer_info[rx->last_head];
- first_index = rx->last_head;
- if (descriptor->data0 & RX_DESC_DATA0_LS_) {
- last_index = rx->last_head;
- } else {
- int index;
-
- index = lan743x_rx_next_index(rx, first_index);
- while (index != current_head_index) {
- descriptor = &rx->ring_cpu_ptr[index];
- if (descriptor->data0 & RX_DESC_DATA0_OWN_)
- goto done;
-
- if (descriptor->data0 & RX_DESC_DATA0_LS_) {
- last_index = index;
- break;
- }
- index = lan743x_rx_next_index(rx, index);
- }
- }
- if (last_index >= 0) {
- descriptor = &rx->ring_cpu_ptr[last_index];
- if (descriptor->data0 & RX_DESC_DATA0_EXT_) {
- /* extension is expected to follow */
- int index = lan743x_rx_next_index(rx,
- last_index);
- if (index != current_head_index) {
- descriptor = &rx->ring_cpu_ptr[index];
- if (descriptor->data0 &
- RX_DESC_DATA0_OWN_) {
- goto done;
- }
- if (descriptor->data0 &
- RX_DESC_DATA0_EXT_) {
- extension_index = index;
- } else {
- goto done;
- }
- } else {
- /* extension is not yet available */
- /* prevent processing of this packet */
- first_index = -1;
- last_index = -1;
- }
- }
- }
- }
- if (first_index >= 0 && last_index >= 0) {
- int real_last_index = last_index;
- struct sk_buff *skb = NULL;
- u32 ts_sec = 0;
- u32 ts_nsec = 0;
-
- /* packet is available */
- if (first_index == last_index) {
- /* single buffer packet */
- struct sk_buff *new_skb = NULL;
- int packet_length;
-
- new_skb = lan743x_rx_allocate_skb(rx);
- if (!new_skb) {
- /* failed to allocate next skb.
- * Memory is very low.
- * Drop this packet and reuse buffer.
- */
- lan743x_rx_reuse_ring_element(rx, first_index);
- goto process_extension;
- }
+ is_last = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_LS_;
+ is_first = le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_FS_;
- buffer_info = &rx->buffer_info[first_index];
- skb = buffer_info->skb;
- descriptor = &rx->ring_cpu_ptr[first_index];
-
- /* unmap from dma */
- if (buffer_info->dma_ptr) {
- dma_unmap_single(&rx->adapter->pdev->dev,
- buffer_info->dma_ptr,
- buffer_info->buffer_length,
- DMA_FROM_DEVICE);
- buffer_info->dma_ptr = 0;
- buffer_info->buffer_length = 0;
- }
- buffer_info->skb = NULL;
- packet_length = RX_DESC_DATA0_FRAME_LENGTH_GET_
- (descriptor->data0);
- skb_put(skb, packet_length - 4);
- skb->protocol = eth_type_trans(skb,
- rx->adapter->netdev);
- lan743x_rx_init_ring_element(rx, first_index, new_skb);
- } else {
- int index = first_index;
+ if (is_last && le32_to_cpu(descriptor->data0) & RX_DESC_DATA0_EXT_) {
+ /* extension is expected to follow */
+ int index = lan743x_rx_next_index(rx, rx->last_head);
- /* multi buffer packet not supported */
- /* this should not happen since
- * buffers are allocated to be at least jumbo size
- */
+ if (index == current_head_index)
+ /* extension not yet available */
+ goto done;
+ desc_ext = &rx->ring_cpu_ptr[index];
+ if (le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_OWN_)
+ /* extension not yet available */
+ goto done;
+ if (!(le32_to_cpu(desc_ext->data0) & RX_DESC_DATA0_EXT_))
+ goto move_forward;
+ extension_index = index;
+ }
- /* clean up buffers */
- if (first_index <= last_index) {
- while ((index >= first_index) &&
- (index <= last_index)) {
- lan743x_rx_reuse_ring_element(rx,
- index);
- index = lan743x_rx_next_index(rx,
- index);
- }
- } else {
- while ((index >= first_index) ||
- (index <= last_index)) {
- lan743x_rx_reuse_ring_element(rx,
- index);
- index = lan743x_rx_next_index(rx,
- index);
- }
- }
- }
+ /* Only the last buffer in a multi-buffer frame contains the total frame
+ * length. The chip occasionally sends more buffers than strictly
+ * required to reach the total frame length.
+ * Handle this by adding all buffers to the skb in their entirety.
+ * Once the real frame length is known, trim the skb.
+ */
+ frame_length =
+ RX_DESC_DATA0_FRAME_LENGTH_GET_(le32_to_cpu(descriptor->data0));
+ buffer_length = buffer_info->buffer_length;
+
+ netdev_dbg(netdev, "%s%schunk: %d/%d",
+ is_first ? "first " : " ",
+ is_last ? "last " : " ",
+ frame_length, buffer_length);
+
+ /* save existing skb, allocate new skb and map to dma */
+ skb = buffer_info->skb;
+ if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
+ /* failed to allocate next skb.
+ * Memory is very low.
+ * Drop this packet and reuse buffer.
+ */
+ lan743x_rx_reuse_ring_element(rx, rx->last_head);
+ /* drop packet that was being assembled */
+ dev_kfree_skb_irq(rx->skb_head);
+ rx->skb_head = NULL;
+ goto process_extension;
+ }
+
+ /* add buffers to skb via skb->frag_list */
+ if (is_first) {
+ skb_reserve(skb, RX_HEAD_PADDING);
+ skb_put(skb, buffer_length - RX_HEAD_PADDING);
+ if (rx->skb_head)
+ dev_kfree_skb_irq(rx->skb_head);
+ rx->skb_head = skb;
+ } else if (rx->skb_head) {
+ skb_put(skb, buffer_length);
+ if (skb_shinfo(rx->skb_head)->frag_list)
+ rx->skb_tail->next = skb;
+ else
+ skb_shinfo(rx->skb_head)->frag_list = skb;
+ rx->skb_tail = skb;
+ rx->skb_head->len += skb->len;
+ rx->skb_head->data_len += skb->len;
+ rx->skb_head->truesize += skb->truesize;
+ } else {
+ /* packet to assemble has already been dropped because one or
+ * more of its buffers could not be allocated
+ */
+ netdev_dbg(netdev, "drop buffer intended for dropped packet");
+ dev_kfree_skb_irq(skb);
+ }
process_extension:
- if (extension_index >= 0) {
- descriptor = &rx->ring_cpu_ptr[extension_index];
- buffer_info = &rx->buffer_info[extension_index];
-
- ts_sec = descriptor->data1;
- ts_nsec = (descriptor->data2 &
- RX_DESC_DATA2_TS_NS_MASK_);
- lan743x_rx_reuse_ring_element(rx, extension_index);
- real_last_index = extension_index;
- }
+ if (extension_index >= 0) {
+ u32 ts_sec;
+ u32 ts_nsec;
- if (!skb) {
- result = RX_PROCESS_RESULT_PACKET_DROPPED;
- goto move_forward;
- }
+ ts_sec = le32_to_cpu(desc_ext->data1);
+ ts_nsec = (le32_to_cpu(desc_ext->data2) &
+ RX_DESC_DATA2_TS_NS_MASK_);
+ if (rx->skb_head)
+ skb_hwtstamps(rx->skb_head)->hwtstamp =
+ ktime_set(ts_sec, ts_nsec);
+ lan743x_rx_reuse_ring_element(rx, extension_index);
+ rx->last_head = extension_index;
+ netdev_dbg(netdev, "process extension");
+ }
- if (extension_index < 0)
- goto pass_packet_to_os;
- hwtstamps = skb_hwtstamps(skb);
- if (hwtstamps)
- hwtstamps->hwtstamp = ktime_set(ts_sec, ts_nsec);
+ if (is_last && rx->skb_head)
+ rx->skb_head = lan743x_rx_trim_skb(rx->skb_head, frame_length);
-pass_packet_to_os:
- /* pass packet to OS */
- napi_gro_receive(&rx->napi, skb);
- result = RX_PROCESS_RESULT_PACKET_RECEIVED;
+ if (is_last && rx->skb_head) {
+ rx->skb_head->protocol = eth_type_trans(rx->skb_head,
+ rx->adapter->netdev);
+ netdev_dbg(netdev, "sending %d byte frame to OS",
+ rx->skb_head->len);
+ napi_gro_receive(&rx->napi, rx->skb_head);
+ rx->skb_head = NULL;
+ }
move_forward:
- /* push tail and head forward */
- rx->last_tail = real_last_index;
- rx->last_head = lan743x_rx_next_index(rx, real_last_index);
- }
+ /* push tail and head forward */
+ rx->last_tail = rx->last_head;
+ rx->last_head = lan743x_rx_next_index(rx, rx->last_head);
+ result = RX_PROCESS_RESULT_BUFFER_RECEIVED;
done:
return result;
}
@@ -2213,12 +2198,12 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
DMAC_INT_BIT_RXFRM_(rx->channel_number));
}
for (count = 0; count < weight; count++) {
- result = lan743x_rx_process_packet(rx);
+ result = lan743x_rx_process_buffer(rx);
if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
break;
}
rx->frame_count += count;
- if (count == weight || result == RX_PROCESS_RESULT_PACKET_RECEIVED)
+ if (count == weight || result == RX_PROCESS_RESULT_BUFFER_RECEIVED)
return weight;
if (!napi_complete_done(napi, count))
@@ -2330,9 +2315,7 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
-
- ret = lan743x_rx_init_ring_element(rx, index, new_skb);
+ ret = lan743x_rx_init_ring_element(rx, index);
if (ret)
goto cleanup;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 404af3f4635e..6080028c1df2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -661,7 +661,7 @@ struct lan743x_tx {
struct lan743x_tx_buffer_info *buffer_info;
- u32 *head_cpu_ptr;
+ __le32 *head_cpu_ptr;
dma_addr_t head_dma_ptr;
int last_head;
int last_tail;
@@ -691,7 +691,7 @@ struct lan743x_rx {
struct lan743x_rx_buffer_info *buffer_info;
- u32 *head_cpu_ptr;
+ __le32 *head_cpu_ptr;
dma_addr_t head_dma_ptr;
u32 last_head;
u32 last_tail;
@@ -699,6 +699,8 @@ struct lan743x_rx {
struct napi_struct napi;
u32 frame_count;
+
+ struct sk_buff *skb_head, *skb_tail;
};
struct lan743x_adapter {
@@ -775,10 +777,10 @@ struct lan743x_adapter {
#define TX_DESC_DATA3_FRAME_LENGTH_MSS_MASK_ (0x3FFF0000)
struct lan743x_tx_descriptor {
- u32 data0;
- u32 data1;
- u32 data2;
- u32 data3;
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define TX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
@@ -813,10 +815,10 @@ struct lan743x_tx_buffer_info {
#define RX_HEAD_PADDING NET_IP_ALIGN
struct lan743x_rx_descriptor {
- u32 data0;
- u32 data1;
- u32 data2;
- u32 data3;
+ __le32 data0;
+ __le32 data1;
+ __le32 data2;
+ __le32 data3;
} __aligned(DEFAULT_DMA_DESCRIPTOR_SPACING);
#define RX_BUFFER_INFO_FLAG_ACTIVE BIT(0)
@@ -831,8 +833,7 @@ struct lan743x_rx_buffer_info {
#define LAN743X_RX_RING_SIZE (65)
#define RX_PROCESS_RESULT_NOTHING_TO_DO (0)
-#define RX_PROCESS_RESULT_PACKET_RECEIVED (1)
-#define RX_PROCESS_RESULT_PACKET_DROPPED (2)
+#define RX_PROCESS_RESULT_BUFFER_RECEIVED (1)
u32 lan743x_csr_read(struct lan743x_adapter *adapter, int offset);
void lan743x_csr_write(struct lan743x_adapter *adapter, int offset, u32 data);
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index ee7bb7e24e8e..c0ede0ca7115 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -14,6 +14,7 @@ if NET_VENDOR_MICROSEMI
# Users should depend on NET_SWITCHDEV, HAS_IOMEM
config MSCC_OCELOT_SWITCH_LIB
select REGMAP_MMIO
+ select PACKING
select PHYLIB
tristate
help
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 58f94c3d80f9..722c27694b21 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -6,7 +6,9 @@ mscc_ocelot_switch_lib-y := \
ocelot_police.o \
ocelot_vcap.o \
ocelot_flower.o \
- ocelot_ptp.o
+ ocelot_ptp.o \
+ ocelot_devlink.o
+mscc_ocelot_switch_lib-$(CONFIG_BRIDGE_MRP) += ocelot_mrp.o
obj-$(CONFIG_MSCC_OCELOT_SWITCH) += mscc_ocelot.o
mscc_ocelot-y := \
ocelot_vsc7514.o \
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ff87a0bc089c..46e5c9136bac 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2017 Microsemi Corporation
*/
+#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
@@ -221,25 +222,20 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware, struct switchdev_trans *trans)
+ bool vlan_aware)
{
+ struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_vcap_filter *filter;
u32 val;
- if (switchdev_trans_ph_prepare(trans)) {
- struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
- struct ocelot_vcap_filter *filter;
-
- list_for_each_entry(filter, &block->rules, list) {
- if (filter->ingress_port_mask & BIT(port) &&
- filter->action.vid_replace_ena) {
- dev_err(ocelot->dev,
- "Cannot change VLAN state with vlan modify rules active\n");
- return -EBUSY;
- }
+ list_for_each_entry(filter, &block->rules, list) {
+ if (filter->ingress_port_mask & BIT(port) &&
+ filter->action.vid_replace_ena) {
+ dev_err(ocelot->dev,
+ "Cannot change VLAN state with vlan modify rules active\n");
+ return -EBUSY;
}
-
- return 0;
}
ocelot_port->vlan_aware = vlan_aware;
@@ -375,6 +371,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
}
}
+static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
+{
+ return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
+}
+
+int ocelot_port_flush(struct ocelot *ocelot, int port)
+{
+ int err, val;
+
+ /* Disable dequeuing from the egress queues */
+ ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE, port);
+
+ /* Disable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+
+ /* Disable priority flow control */
+ ocelot_fields_write(ocelot, port,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
+
+ /* Wait at least the time it takes to receive a frame of maximum length
+ * at the port.
+ * Worst-case delays for 10 kilobyte jumbo frames are:
+ * 8 ms on a 10M port
+ * 800 μs on a 100M port
+ * 80 μs on a 1G port
+ * 32 μs on a 2.5G port
+ */
+ usleep_range(8000, 10000);
+
+ /* Disable half duplex backpressure. */
+ ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
+ SYS_FRONT_PORT_MODE, port);
+
+ /* Flush the queues associated with the port. */
+ ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
+ REW_PORT_CFG, port);
+
+ /* Enable dequeuing from the egress queues. */
+ ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
+ port);
+
+ /* Wait until flushing is complete. */
+ err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
+ 100, 2000000, false, ocelot, port);
+
+ /* Clear flushing again. */
+ ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+
+ return err;
+}
+EXPORT_SYMBOL(ocelot_port_flush);
+
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev)
{
@@ -579,6 +629,229 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
+static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
+ u32 *rval)
+{
+ u32 bytes_valid, val;
+
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_NOT_READY) {
+ if (ifh)
+ return -EIO;
+
+ do {
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ } while (val == XTR_NOT_READY);
+ }
+
+ switch (val) {
+ case XTR_ABORT:
+ return -EIO;
+ case XTR_EOF_0:
+ case XTR_EOF_1:
+ case XTR_EOF_2:
+ case XTR_EOF_3:
+ case XTR_PRUNED:
+ bytes_valid = XTR_VALID_BYTES(val);
+ val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ if (val == XTR_ESCAPE)
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+ else
+ *rval = val;
+
+ return bytes_valid;
+ case XTR_ESCAPE:
+ *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+
+ return 4;
+ default:
+ *rval = val;
+
+ return 4;
+ }
+}
+
+static int ocelot_xtr_poll_xfh(struct ocelot *ocelot, int grp, u32 *xfh)
+{
+ int i, err = 0;
+
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
+ err = ocelot_rx_frame_word(ocelot, grp, true, &xfh[i]);
+ if (err != 4)
+ return (err < 0) ? err : -EIO;
+ }
+
+ return 0;
+}
+
+int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
+{
+ struct skb_shared_hwtstamps *shhwtstamps;
+ u64 tod_in_ns, full_ts_in_ns, cpuq;
+ u64 timestamp, src_port, len;
+ u32 xfh[OCELOT_TAG_LEN / 4];
+ struct net_device *dev;
+ struct timespec64 ts;
+ struct sk_buff *skb;
+ int sz, buf_len;
+ u32 val, *buf;
+ int err;
+
+ err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
+ if (err)
+ return err;
+
+ ocelot_xfh_get_src_port(xfh, &src_port);
+ ocelot_xfh_get_len(xfh, &len);
+ ocelot_xfh_get_rew_val(xfh, &timestamp);
+ ocelot_xfh_get_cpuq(xfh, &cpuq);
+
+ if (WARN_ON(src_port >= ocelot->num_phys_ports))
+ return -EINVAL;
+
+ dev = ocelot->ops->port_to_netdev(ocelot, src_port);
+ if (!dev)
+ return -EINVAL;
+
+ skb = netdev_alloc_skb(dev, len);
+ if (unlikely(!skb)) {
+ netdev_err(dev, "Unable to allocate sk_buff\n");
+ return -ENOMEM;
+ }
+
+ buf_len = len - ETH_FCS_LEN;
+ buf = (u32 *)skb_put(skb, buf_len);
+
+ len = 0;
+ do {
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ if (sz < 0) {
+ err = sz;
+ goto out_free_skb;
+ }
+ *buf++ = val;
+ len += sz;
+ } while (len < buf_len);
+
+ /* Read the FCS */
+ sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
+ if (sz < 0) {
+ err = sz;
+ goto out_free_skb;
+ }
+
+ /* Update the statistics if part of the FCS was read before */
+ len -= ETH_FCS_LEN - sz;
+
+ if (unlikely(dev->features & NETIF_F_RXFCS)) {
+ buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
+ *buf = val;
+ }
+
+ if (ocelot->ptp) {
+ ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
+
+ tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
+ if ((tod_in_ns & 0xffffffff) < timestamp)
+ full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
+ timestamp;
+ else
+ full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
+ timestamp;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamps->hwtstamp = full_ts_in_ns;
+ }
+
+ /* Everything we see on an interface that is in the HW bridge
+ * has already been forwarded.
+ */
+ if (ocelot->bridge_mask & BIT(src_port))
+ skb->offload_fwd_mark = 1;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+#if IS_ENABLED(CONFIG_BRIDGE_MRP)
+ if (skb->protocol == cpu_to_be16(ETH_P_MRP) &&
+ cpuq & BIT(OCELOT_MRP_CPUQ))
+ skb->offload_fwd_mark = 0;
+#endif
+
+ *nskb = skb;
+
+ return 0;
+
+out_free_skb:
+ kfree_skb(skb);
+ return err;
+}
+EXPORT_SYMBOL(ocelot_xtr_poll_frame);
+
+bool ocelot_can_inject(struct ocelot *ocelot, int grp)
+{
+ u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
+
+ if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
+ return false;
+ if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(ocelot_can_inject);
+
+void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
+ u32 rew_op, struct sk_buff *skb)
+{
+ u32 ifh[OCELOT_TAG_LEN / 4] = {0};
+ unsigned int i, count, last;
+
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
+
+ ocelot_ifh_set_bypass(ifh, 1);
+ ocelot_ifh_set_dest(ifh, BIT_ULL(port));
+ ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
+ ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_rew_op(ifh, rew_op);
+
+ for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
+ ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+
+ count = DIV_ROUND_UP(skb->len, 4);
+ last = skb->len % 4;
+ for (i = 0; i < count; i++)
+ ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
+
+ /* Add padding */
+ while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ i++;
+ }
+
+ /* Indicate EOF and valid bytes in last word */
+ ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
+ QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
+ QS_INJ_CTRL_EOF,
+ QS_INJ_CTRL, grp);
+
+ /* Add dummy CRC */
+ ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
+ skb_tx_timestamp(skb);
+
+ skb->dev->stats.tx_packets++;
+ skb->dev->stats.tx_bytes += skb->len;
+}
+EXPORT_SYMBOL(ocelot_port_inject_frame);
+
+void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
+{
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
+ ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+}
+EXPORT_SYMBOL(ocelot_drain_cpu_queue);
+
int ocelot_fdb_add(struct ocelot *ocelot, int port,
const unsigned char *addr, u16 vid)
{
@@ -894,10 +1167,103 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
}
EXPORT_SYMBOL(ocelot_get_ts_info);
+static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
+ bool only_active_ports)
+{
+ u32 mask = 0;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ if (ocelot_port->bond == bond) {
+ if (only_active_ports && !ocelot_port->lag_tx_active)
+ continue;
+
+ mask |= BIT(port);
+ }
+ }
+
+ return mask;
+}
+
+static u32 ocelot_get_dsa_8021q_cpu_mask(struct ocelot *ocelot)
+{
+ u32 mask = 0;
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port)
+ continue;
+
+ if (ocelot_port->is_dsa_8021q_cpu)
+ mask |= BIT(port);
+ }
+
+ return mask;
+}
+
+void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
+{
+ unsigned long cpu_fwd_mask;
+ int port;
+
+ /* If a DSA tag_8021q CPU exists, it needs to be included in the
+ * regular forwarding path of the front ports regardless of whether
+ * those are bridged or standalone.
+ * If DSA tag_8021q is not used, this returns 0, which is fine because
+ * the hardware-based CPU port module can be a destination for packets
+ * even if it isn't part of PGID_SRC.
+ */
+ cpu_fwd_mask = ocelot_get_dsa_8021q_cpu_mask(ocelot);
+
+ /* Apply FWD mask. The loop is needed to add/remove the current port as
+ * a source for the other ports.
+ */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ unsigned long mask;
+
+ if (!ocelot_port) {
+ /* Unused ports can't send anywhere */
+ mask = 0;
+ } else if (ocelot_port->is_dsa_8021q_cpu) {
+ /* The DSA tag_8021q CPU ports need to be able to
+ * forward packets to all other ports except for
+ * themselves
+ */
+ mask = GENMASK(ocelot->num_phys_ports - 1, 0);
+ mask &= ~cpu_fwd_mask;
+ } else if (ocelot->bridge_fwd_mask & BIT(port)) {
+ struct net_device *bond = ocelot_port->bond;
+
+ mask = ocelot->bridge_fwd_mask & ~BIT(port);
+ if (bond) {
+ mask &= ~ocelot_get_bond_mask(ocelot, bond,
+ false);
+ }
+ } else {
+ /* Standalone ports forward only to DSA tag_8021q CPU
+ * ports (if those exist), or to the hardware CPU port
+ * module otherwise.
+ */
+ mask = cpu_fwd_mask;
+ }
+
+ ocelot_write_rix(ocelot, mask, ANA_PGID_PGID, PGID_SRC + port);
+ }
+}
+EXPORT_SYMBOL(ocelot_apply_bridge_fwd_mask);
+
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 port_cfg;
- int p, i;
if (!(BIT(port) & ocelot->bridge_mask))
return;
@@ -909,7 +1275,8 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot->bridge_fwd_mask |= BIT(port);
fallthrough;
case BR_STATE_LEARNING:
- port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
+ if (ocelot_port->learn_ena)
+ port_cfg |= ANA_PORT_PORT_CFG_LEARN_ENA;
break;
default:
@@ -920,32 +1287,7 @@ void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state)
ocelot_write_gix(ocelot, port_cfg, ANA_PORT_PORT_CFG, port);
- /* Apply FWD mask. The loop is needed to add/remove the current port as
- * a source for the other ports.
- */
- for (p = 0; p < ocelot->num_phys_ports; p++) {
- if (ocelot->bridge_fwd_mask & BIT(p)) {
- unsigned long mask = ocelot->bridge_fwd_mask & ~BIT(p);
-
- for (i = 0; i < ocelot->num_phys_ports; i++) {
- unsigned long bond_mask = ocelot->lags[i];
-
- if (!bond_mask)
- continue;
-
- if (bond_mask & BIT(p)) {
- mask &= ~bond_mask;
- break;
- }
- }
-
- ocelot_write_rix(ocelot, mask,
- ANA_PGID_PGID, PGID_SRC + p);
- } else {
- ocelot_write_rix(ocelot, 0,
- ANA_PGID_PGID, PGID_SRC + p);
- }
- }
+ ocelot_apply_bridge_fwd_mask(ocelot);
}
EXPORT_SYMBOL(ocelot_bridge_stp_state_set);
@@ -1192,7 +1534,6 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_vlan pvid = {0}, native_vlan = {0};
- struct switchdev_trans trans;
int ret;
ocelot->bridge_mask &= ~BIT(port);
@@ -1200,13 +1541,7 @@ int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- trans.ph_prepare = true;
- ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
- if (ret)
- return ret;
-
- trans.ph_prepare = false;
- ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ ret = ocelot_port_vlan_filtering(ocelot, port, false);
if (ret)
return ret;
@@ -1219,6 +1554,7 @@ EXPORT_SYMBOL(ocelot_port_bridge_leave);
static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
{
+ unsigned long visited = GENMASK(ocelot->num_phys_ports - 1, 0);
int i, port, lag;
/* Reset destination and aggregation PGIDS */
@@ -1229,22 +1565,40 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
ocelot_write_rix(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0),
ANA_PGID_PGID, i);
- /* Now, set PGIDs for each LAG */
+ /* The visited ports bitmask holds the list of ports offloading any
+ * bonding interface. Initially we mark all these ports as unvisited,
+ * then every time we visit a port in this bitmask, we know that it is
+ * the lowest numbered port, i.e. the one whose logical ID == physical
+ * port ID == LAG ID. So we mark as visited all further ports in the
+ * bitmask that are offloading the same bonding interface. This way,
+ * we set up the aggregation PGIDs only once per bonding interface.
+ */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (!ocelot_port || !ocelot_port->bond)
+ continue;
+
+ visited &= ~BIT(port);
+ }
+
+ /* Now, set PGIDs for each active LAG */
for (lag = 0; lag < ocelot->num_phys_ports; lag++) {
+ struct net_device *bond = ocelot->ports[lag]->bond;
+ int num_active_ports = 0;
unsigned long bond_mask;
- int aggr_count = 0;
u8 aggr_idx[16];
- bond_mask = ocelot->lags[lag];
- if (!bond_mask)
+ if (!bond || (visited & BIT(lag)))
continue;
+ bond_mask = ocelot_get_bond_mask(ocelot, bond, true);
+
for_each_set_bit(port, &bond_mask, ocelot->num_phys_ports) {
// Destination mask
ocelot_write_rix(ocelot, bond_mask,
ANA_PGID_PGID, port);
- aggr_idx[aggr_count] = port;
- aggr_count++;
+ aggr_idx[num_active_ports++] = port;
}
for_each_aggr_pgid(ocelot, i) {
@@ -1252,63 +1606,74 @@ static void ocelot_set_aggr_pgids(struct ocelot *ocelot)
ac = ocelot_read_rix(ocelot, ANA_PGID_PGID, i);
ac &= ~bond_mask;
- ac |= BIT(aggr_idx[i % aggr_count]);
+ /* Don't do division by zero if there was no active
+ * port. Just make all aggregation codes zero.
+ */
+ if (num_active_ports)
+ ac |= BIT(aggr_idx[i % num_active_ports]);
ocelot_write_rix(ocelot, ac, ANA_PGID_PGID, i);
}
- }
-}
-
-static void ocelot_setup_lag(struct ocelot *ocelot, int lag)
-{
- unsigned long bond_mask = ocelot->lags[lag];
- unsigned int p;
- for_each_set_bit(p, &bond_mask, ocelot->num_phys_ports) {
- u32 port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, p);
+ /* Mark all ports in the same LAG as visited to avoid applying
+ * the same config again.
+ */
+ for (port = lag; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
+ if (!ocelot_port)
+ continue;
- /* Use lag port as logical port for port i */
- ocelot_write_gix(ocelot, port_cfg |
- ANA_PORT_PORT_CFG_PORTID_VAL(lag),
- ANA_PORT_PORT_CFG, p);
+ if (ocelot_port->bond == bond)
+ visited |= BIT(port);
+ }
}
}
-int ocelot_port_lag_join(struct ocelot *ocelot, int port,
- struct net_device *bond)
+/* When offloading a bonding interface, the switch ports configured under the
+ * same bond must have the same logical port ID, equal to the physical port ID
+ * of the lowest numbered physical port in that bond. Otherwise, in standalone/
+ * bridged mode, each port has a logical port ID equal to its physical port ID.
+ */
+static void ocelot_setup_logical_port_ids(struct ocelot *ocelot)
{
- struct net_device *ndev;
- u32 bond_mask = 0;
- int lag, lp;
+ int port;
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(bond, ndev) {
- struct ocelot_port_private *priv = netdev_priv(ndev);
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct net_device *bond;
- bond_mask |= BIT(priv->chip_port);
- }
- rcu_read_unlock();
+ if (!ocelot_port)
+ continue;
- lp = __ffs(bond_mask);
+ bond = ocelot_port->bond;
+ if (bond) {
+ int lag = __ffs(ocelot_get_bond_mask(ocelot, bond,
+ false));
- /* If the new port is the lowest one, use it as the logical port from
- * now on
- */
- if (port == lp) {
- lag = port;
- ocelot->lags[port] = bond_mask;
- bond_mask &= ~BIT(port);
- if (bond_mask) {
- lp = __ffs(bond_mask);
- ocelot->lags[lp] = 0;
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_PORT_CFG_PORTID_VAL(lag),
+ ANA_PORT_PORT_CFG_PORTID_VAL_M,
+ ANA_PORT_PORT_CFG, port);
+ } else {
+ ocelot_rmw_gix(ocelot,
+ ANA_PORT_PORT_CFG_PORTID_VAL(port),
+ ANA_PORT_PORT_CFG_PORTID_VAL_M,
+ ANA_PORT_PORT_CFG, port);
}
- } else {
- lag = lp;
- ocelot->lags[lp] |= BIT(port);
}
+}
+
+int ocelot_port_lag_join(struct ocelot *ocelot, int port,
+ struct net_device *bond,
+ struct netdev_lag_upper_info *info)
+{
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return -EOPNOTSUPP;
- ocelot_setup_lag(ocelot, lag);
+ ocelot->ports[port]->bond = bond;
+
+ ocelot_setup_logical_port_ids(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot);
ocelot_set_aggr_pgids(ocelot);
return 0;
@@ -1318,33 +1683,24 @@ EXPORT_SYMBOL(ocelot_port_lag_join);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond)
{
- u32 port_cfg;
- int i;
-
- /* Remove port from any lag */
- for (i = 0; i < ocelot->num_phys_ports; i++)
- ocelot->lags[i] &= ~BIT(port);
+ ocelot->ports[port]->bond = NULL;
- /* if it was the logical port of the lag, move the lag config to the
- * next port
- */
- if (ocelot->lags[port]) {
- int n = __ffs(ocelot->lags[port]);
+ ocelot_setup_logical_port_ids(ocelot);
+ ocelot_apply_bridge_fwd_mask(ocelot);
+ ocelot_set_aggr_pgids(ocelot);
+}
+EXPORT_SYMBOL(ocelot_port_lag_leave);
- ocelot->lags[n] = ocelot->lags[port];
- ocelot->lags[port] = 0;
+void ocelot_port_lag_change(struct ocelot *ocelot, int port, bool lag_tx_active)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- ocelot_setup_lag(ocelot, n);
- }
-
- port_cfg = ocelot_read_gix(ocelot, ANA_PORT_PORT_CFG, port);
- port_cfg &= ~ANA_PORT_PORT_CFG_PORTID_VAL_M;
- ocelot_write_gix(ocelot, port_cfg | ANA_PORT_PORT_CFG_PORTID_VAL(port),
- ANA_PORT_PORT_CFG, port);
+ ocelot_port->lag_tx_active = lag_tx_active;
+ /* Rebalance the LAGs */
ocelot_set_aggr_pgids(ocelot);
}
-EXPORT_SYMBOL(ocelot_port_lag_leave);
+EXPORT_SYMBOL(ocelot_port_lag_change);
/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
* The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
@@ -1362,9 +1718,9 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
if (port == ocelot->npi) {
maxlen += OCELOT_TAG_LEN;
- if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
maxlen += OCELOT_SHORT_PREFIX_LEN;
- else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
maxlen += OCELOT_LONG_PREFIX_LEN;
}
@@ -1379,7 +1735,7 @@ void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
pause_stop);
/* Tail dropping watermarks */
- atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) /
+ atop_tot = (ocelot->packet_buffer_size - 9 * maxlen) /
OCELOT_BUFFER_CELL_SZ;
atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ;
ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port);
@@ -1394,9 +1750,9 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
if (port == ocelot->npi) {
max_mtu -= OCELOT_TAG_LEN;
- if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_SHORT)
+ if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_SHORT)
max_mtu -= OCELOT_SHORT_PREFIX_LEN;
- else if (ocelot->inj_prefix == OCELOT_TAG_PREFIX_LONG)
+ else if (ocelot->npi_inj_prefix == OCELOT_TAG_PREFIX_LONG)
max_mtu -= OCELOT_LONG_PREFIX_LEN;
}
@@ -1404,6 +1760,86 @@ int ocelot_get_max_mtu(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_get_max_mtu);
+static void ocelot_port_set_learning(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 val = 0;
+
+ if (enabled)
+ val = ANA_PORT_PORT_CFG_LEARN_ENA;
+
+ ocelot_rmw_gix(ocelot, val, ANA_PORT_PORT_CFG_LEARN_ENA,
+ ANA_PORT_PORT_CFG, port);
+
+ ocelot_port->learn_ena = enabled;
+}
+
+static void ocelot_port_set_ucast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_UC);
+}
+
+static void ocelot_port_set_mcast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_MC);
+}
+
+static void ocelot_port_set_bcast_flood(struct ocelot *ocelot, int port,
+ bool enabled)
+{
+ u32 val = 0;
+
+ if (enabled)
+ val = BIT(port);
+
+ ocelot_rmw_rix(ocelot, val, BIT(port), ANA_PGID_PGID, PGID_BC);
+}
+
+int ocelot_port_pre_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
+ BR_BCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_port_pre_bridge_flags);
+
+void ocelot_port_bridge_flags(struct ocelot *ocelot, int port,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & BR_LEARNING)
+ ocelot_port_set_learning(ocelot, port,
+ !!(flags.val & BR_LEARNING));
+
+ if (flags.mask & BR_FLOOD)
+ ocelot_port_set_ucast_flood(ocelot, port,
+ !!(flags.val & BR_FLOOD));
+
+ if (flags.mask & BR_MCAST_FLOOD)
+ ocelot_port_set_mcast_flood(ocelot, port,
+ !!(flags.val & BR_MCAST_FLOOD));
+
+ if (flags.mask & BR_BCAST_FLOOD)
+ ocelot_port_set_bcast_flood(ocelot, port,
+ !!(flags.val & BR_BCAST_FLOOD));
+}
+EXPORT_SYMBOL(ocelot_port_bridge_flags);
+
void ocelot_init_port(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
@@ -1453,6 +1889,9 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
REW_PORT_VLAN_CFG_PORT_TPID_M,
REW_PORT_VLAN_CFG, port);
+ /* Disable source address learning for standalone mode */
+ ocelot_port_set_learning(ocelot, port, false);
+
/* Enable vcap lookups */
ocelot_vcap_enable(ocelot, port);
}
@@ -1481,9 +1920,9 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
/* CPU port Injection/Extraction configuration */
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
- ocelot->xtr_prefix);
+ OCELOT_TAG_PREFIX_NONE);
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
- ocelot->inj_prefix);
+ OCELOT_TAG_PREFIX_NONE);
/* Configure the CPU port to be VLAN aware */
ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
@@ -1492,6 +1931,21 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
ANA_PORT_VLAN_CFG, cpu);
}
+static void ocelot_detect_features(struct ocelot *ocelot)
+{
+ int mmgt, eq_ctrl;
+
+ /* For Ocelot, Felix, Seville, Serval etc, SYS:MMGT:MMGT:FREECNT holds
+ * the number of 240-byte free memory words (aka 4-cell chunks) and not
+ * 192 bytes as the documentation incorrectly says.
+ */
+ mmgt = ocelot_read(ocelot, SYS_MMGT);
+ ocelot->packet_buffer_size = 240 * SYS_MMGT_FREECNT(mmgt);
+
+ eq_ctrl = ocelot_read(ocelot, QSYS_EQ_CTRL);
+ ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
+}
+
int ocelot_init(struct ocelot *ocelot)
{
char queue_name[32];
@@ -1506,11 +1960,6 @@ int ocelot_init(struct ocelot *ocelot)
}
}
- ocelot->lags = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
- sizeof(u32), GFP_KERNEL);
- if (!ocelot->lags)
- return -ENOMEM;
-
ocelot->stats = devm_kcalloc(ocelot->dev,
ocelot->num_phys_ports * ocelot->num_stats,
sizeof(u64), GFP_KERNEL);
@@ -1534,6 +1983,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
+ ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
@@ -1553,7 +2003,10 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write(ocelot, ANA_AGGR_CFG_AC_SMAC_ENA |
ANA_AGGR_CFG_AC_DMAC_ENA |
ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA |
- ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA, ANA_AGGR_CFG);
+ ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA |
+ ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA |
+ ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA,
+ ANA_AGGR_CFG);
/* Set MAC age time to default value. The entry is aged after
* 2*AGE_PERIOD
@@ -1572,7 +2025,7 @@ int ocelot_init(struct ocelot *ocelot)
/* Setup flooding PGIDs */
for (i = 0; i < ocelot->num_flooding_pgids; i++)
ocelot_write_rix(ocelot, ANA_FLOODING_FLD_MULTICAST(PGID_MC) |
- ANA_FLOODING_FLD_BROADCAST(PGID_MC) |
+ ANA_FLOODING_FLD_BROADCAST(PGID_BC) |
ANA_FLOODING_FLD_UNICAST(PGID_UC),
ANA_FLOODING, i);
ocelot_write(ocelot, ANA_FLOODING_IPMC_FLD_MC6_DATA(PGID_MCIPV6) |
@@ -1593,15 +2046,18 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_SRC + port);
}
- /* Allow broadcast MAC frames. */
for_each_nonreserved_multicast_dest_pgid(ocelot, i) {
u32 val = ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports - 1, 0));
ocelot_write_rix(ocelot, val, ANA_PGID_PGID, i);
}
- ocelot_write_rix(ocelot,
- ANA_PGID_PGID_PGID(GENMASK(ocelot->num_phys_ports, 0)),
- ANA_PGID_PGID, PGID_MC);
+ /* Allow broadcast and unknown L2 multicast to the CPU. */
+ ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID, PGID_MC);
+ ocelot_rmw_rix(ocelot, ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports)),
+ ANA_PGID_PGID, PGID_BC);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV4);
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, PGID_MCIPV6);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 291d39d49c4e..db6b1a4c3926 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -32,15 +32,6 @@
#define OCELOT_PTP_QUEUE_SZ 128
-struct frame_info {
- u32 len;
- u16 port;
- u16 vid;
- u8 tag_type;
- u16 rew_op;
- u32 timestamp; /* rew_val */
-};
-
struct ocelot_port_tc {
bool block_shared;
unsigned long offload_cnt;
@@ -109,10 +100,6 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
unsigned int vid, enum macaccess_entry_type type);
int ocelot_mact_forget(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN], unsigned int vid);
-int ocelot_port_lag_join(struct ocelot *ocelot, int port,
- struct net_device *bond);
-void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
- struct net_device *bond);
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port);
int ocelot_netdev_to_port(struct net_device *dev);
@@ -121,13 +108,16 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
struct phy_device *phy);
-
-void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
+void ocelot_release_port(struct ocelot_port *ocelot_port);
+int ocelot_devlink_init(struct ocelot *ocelot);
+void ocelot_devlink_teardown(struct ocelot *ocelot);
+int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
+ enum devlink_port_flavour flavour);
+void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port);
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
+extern const struct devlink_ops ocelot_devlink_ops;
#endif
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
new file mode 100644
index 000000000000..edafbd37d12c
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright 2020-2021 NXP Semiconductors
+ */
+#include <net/devlink.h>
+#include "ocelot.h"
+
+/* The queue system tracks four resource consumptions:
+ * Resource 0: Memory tracked per source port
+ * Resource 1: Frame references tracked per source port
+ * Resource 2: Memory tracked per destination port
+ * Resource 3: Frame references tracked per destination port
+ */
+#define OCELOT_RESOURCE_SZ 256
+#define OCELOT_NUM_RESOURCES 4
+
+#define BUF_xxxx_I (0 * OCELOT_RESOURCE_SZ)
+#define REF_xxxx_I (1 * OCELOT_RESOURCE_SZ)
+#define BUF_xxxx_E (2 * OCELOT_RESOURCE_SZ)
+#define REF_xxxx_E (3 * OCELOT_RESOURCE_SZ)
+
+/* For each resource type there are 4 types of watermarks:
+ * Q_RSRV: reservation per QoS class per port
+ * PRIO_SHR: sharing watermark per QoS class across all ports
+ * P_RSRV: reservation per port
+ * COL_SHR: sharing watermark per color (drop precedence) across all ports
+ */
+#define xxx_Q_RSRV_x 0
+#define xxx_PRIO_SHR_x 216
+#define xxx_P_RSRV_x 224
+#define xxx_COL_SHR_x 254
+
+/* Reservation Watermarks
+ * ----------------------
+ *
+ * For setting up the reserved areas, egress watermarks exist per port and per
+ * QoS class for both ingress and egress.
+ */
+
+/* Amount of packet buffer
+ * | per QoS class
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * BUF_Q_RSRV_E
+ */
+#define BUF_Q_RSRV_E(port, prio) \
+ (BUF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of packet buffer
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * BUF_P_RSRV_E
+ */
+#define BUF_P_RSRV_E(port) \
+ (BUF_xxxx_E + xxx_P_RSRV_x + (port))
+
+/* Amount of packet buffer
+ * | per QoS class
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * BUF_Q_RSRV_I
+ */
+#define BUF_Q_RSRV_I(port, prio) \
+ (BUF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of packet buffer
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * BUF_P_RSRV_I
+ */
+#define BUF_P_RSRV_I(port) \
+ (BUF_xxxx_I + xxx_P_RSRV_x + (port))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * REF_Q_RSRV_E
+ */
+#define REF_Q_RSRV_E(port, prio) \
+ (REF_xxxx_E + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of frame references
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per egress port
+ * | | | |
+ * V V v v
+ * REF_P_RSRV_E
+ */
+#define REF_P_RSRV_E(port) \
+ (REF_xxxx_E + xxx_P_RSRV_x + (port))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * REF_Q_RSRV_I
+ */
+#define REF_Q_RSRV_I(port, prio) \
+ (REF_xxxx_I + xxx_Q_RSRV_x + OCELOT_NUM_TC * (port) + (prio))
+
+/* Amount of frame references
+ * | for all port's traffic classes
+ * | | reserved
+ * | | | per ingress port
+ * | | | |
+ * V V v v
+ * REF_P_RSRV_I
+ */
+#define REF_P_RSRV_I(port) \
+ (REF_xxxx_I + xxx_P_RSRV_x + (port))
+
+/* Sharing Watermarks
+ * ------------------
+ *
+ * The shared memory area is shared between all ports.
+ */
+
+/* Amount of buffer
+ * | per QoS class
+ * | | from the shared memory area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * BUF_PRIO_SHR_E
+ */
+#define BUF_PRIO_SHR_E(prio) \
+ (BUF_xxxx_E + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of buffer
+ * | per color (drop precedence level)
+ * | | from the shared memory area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * BUF_COL_SHR_E
+ */
+#define BUF_COL_SHR_E(dp) \
+ (BUF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of buffer
+ * | per QoS class
+ * | | from the shared memory area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * BUF_PRIO_SHR_I
+ */
+#define BUF_PRIO_SHR_I(prio) \
+ (BUF_xxxx_I + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of buffer
+ * | per color (drop precedence level)
+ * | | from the shared memory area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * BUF_COL_SHR_I
+ */
+#define BUF_COL_SHR_I(dp) \
+ (BUF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | from the shared area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * REF_PRIO_SHR_E
+ */
+#define REF_PRIO_SHR_E(prio) \
+ (REF_xxxx_E + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of frame references
+ * | per color (drop precedence level)
+ * | | from the shared area
+ * | | | for egress traffic
+ * | | | |
+ * V V v v
+ * REF_COL_SHR_E
+ */
+#define REF_COL_SHR_E(dp) \
+ (REF_xxxx_E + xxx_COL_SHR_x + (1 - (dp)))
+
+/* Amount of frame references
+ * | per QoS class
+ * | | from the shared area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * REF_PRIO_SHR_I
+ */
+#define REF_PRIO_SHR_I(prio) \
+ (REF_xxxx_I + xxx_PRIO_SHR_x + (prio))
+
+/* Amount of frame references
+ * | per color (drop precedence level)
+ * | | from the shared area
+ * | | | for ingress traffic
+ * | | | |
+ * V V v v
+ * REF_COL_SHR_I
+ */
+#define REF_COL_SHR_I(dp) \
+ (REF_xxxx_I + xxx_COL_SHR_x + (1 - (dp)))
+
+static u32 ocelot_wm_read(struct ocelot *ocelot, int index)
+{
+ int wm = ocelot_read_gix(ocelot, QSYS_RES_CFG, index);
+
+ return ocelot->ops->wm_dec(wm);
+}
+
+static void ocelot_wm_write(struct ocelot *ocelot, int index, u32 val)
+{
+ u32 wm = ocelot->ops->wm_enc(val);
+
+ ocelot_write_gix(ocelot, wm, QSYS_RES_CFG, index);
+}
+
+static void ocelot_wm_status(struct ocelot *ocelot, int index, u32 *inuse,
+ u32 *maxuse)
+{
+ int res_stat = ocelot_read_gix(ocelot, QSYS_RES_STAT, index);
+
+ return ocelot->ops->wm_stat(res_stat, inuse, maxuse);
+}
+
+/* The hardware comes out of reset with strange defaults: the sum of all
+ * reservations for frame memory is larger than the total buffer size.
+ * One has to wonder how can the reservation watermarks still guarantee
+ * anything under congestion.
+ * Bring some sense into the hardware by changing the defaults to disable all
+ * reservations and rely only on the sharing watermark for frames with drop
+ * precedence 0. The user can still explicitly request reservations per port
+ * and per port-tc through devlink-sb.
+ */
+static void ocelot_disable_reservation_watermarks(struct ocelot *ocelot,
+ int port)
+{
+ int prio;
+
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_write(ocelot, BUF_Q_RSRV_I(port, prio), 0);
+ ocelot_wm_write(ocelot, BUF_Q_RSRV_E(port, prio), 0);
+ ocelot_wm_write(ocelot, REF_Q_RSRV_I(port, prio), 0);
+ ocelot_wm_write(ocelot, REF_Q_RSRV_E(port, prio), 0);
+ }
+
+ ocelot_wm_write(ocelot, BUF_P_RSRV_I(port), 0);
+ ocelot_wm_write(ocelot, BUF_P_RSRV_E(port), 0);
+ ocelot_wm_write(ocelot, REF_P_RSRV_I(port), 0);
+ ocelot_wm_write(ocelot, REF_P_RSRV_E(port), 0);
+}
+
+/* We want the sharing watermarks to consume all nonreserved resources, for
+ * efficient resource utilization (a single traffic flow should be able to use
+ * up the entire buffer space and frame resources as long as there's no
+ * interference).
+ * The switch has 10 sharing watermarks per lookup: 8 per traffic class and 2
+ * per color (drop precedence).
+ * The trouble with configuring these sharing watermarks is that:
+ * (1) There's a risk that we overcommit the resources if we configure
+ * (a) all 8 per-TC sharing watermarks to the max
+ * (b) all 2 per-color sharing watermarks to the max
+ * (2) There's a risk that we undercommit the resources if we configure
+ * (a) all 8 per-TC sharing watermarks to "max / 8"
+ * (b) all 2 per-color sharing watermarks to "max / 2"
+ * So for Linux, let's just disable the sharing watermarks per traffic class
+ * (setting them to 0 will make them always exceeded), and rely only on the
+ * sharing watermark for drop priority 0. So frames with drop priority set to 1
+ * by QoS classification or policing will still be allowed, but only as long as
+ * the port and port-TC reservations are not exceeded.
+ */
+static void ocelot_disable_tc_sharing_watermarks(struct ocelot *ocelot)
+{
+ int prio;
+
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_write(ocelot, BUF_PRIO_SHR_I(prio), 0);
+ ocelot_wm_write(ocelot, BUF_PRIO_SHR_E(prio), 0);
+ ocelot_wm_write(ocelot, REF_PRIO_SHR_I(prio), 0);
+ ocelot_wm_write(ocelot, REF_PRIO_SHR_E(prio), 0);
+ }
+}
+
+static void ocelot_get_buf_rsrv(struct ocelot *ocelot, u32 *buf_rsrv_i,
+ u32 *buf_rsrv_e)
+{
+ int port, prio;
+
+ *buf_rsrv_i = 0;
+ *buf_rsrv_e = 0;
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ *buf_rsrv_i += ocelot_wm_read(ocelot,
+ BUF_Q_RSRV_I(port, prio));
+ *buf_rsrv_e += ocelot_wm_read(ocelot,
+ BUF_Q_RSRV_E(port, prio));
+ }
+
+ *buf_rsrv_i += ocelot_wm_read(ocelot, BUF_P_RSRV_I(port));
+ *buf_rsrv_e += ocelot_wm_read(ocelot, BUF_P_RSRV_E(port));
+ }
+
+ *buf_rsrv_i *= OCELOT_BUFFER_CELL_SZ;
+ *buf_rsrv_e *= OCELOT_BUFFER_CELL_SZ;
+}
+
+static void ocelot_get_ref_rsrv(struct ocelot *ocelot, u32 *ref_rsrv_i,
+ u32 *ref_rsrv_e)
+{
+ int port, prio;
+
+ *ref_rsrv_i = 0;
+ *ref_rsrv_e = 0;
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ *ref_rsrv_i += ocelot_wm_read(ocelot,
+ REF_Q_RSRV_I(port, prio));
+ *ref_rsrv_e += ocelot_wm_read(ocelot,
+ REF_Q_RSRV_E(port, prio));
+ }
+
+ *ref_rsrv_i += ocelot_wm_read(ocelot, REF_P_RSRV_I(port));
+ *ref_rsrv_e += ocelot_wm_read(ocelot, REF_P_RSRV_E(port));
+ }
+}
+
+/* Calculate all reservations, then set up the sharing watermark for DP=0 to
+ * consume the remaining resources up to the pool's configured size.
+ */
+static void ocelot_setup_sharing_watermarks(struct ocelot *ocelot)
+{
+ u32 buf_rsrv_i, buf_rsrv_e;
+ u32 ref_rsrv_i, ref_rsrv_e;
+ u32 buf_shr_i, buf_shr_e;
+ u32 ref_shr_i, ref_shr_e;
+
+ ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
+ ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
+
+ buf_shr_i = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] -
+ buf_rsrv_i;
+ buf_shr_e = ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] -
+ buf_rsrv_e;
+ ref_shr_i = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] -
+ ref_rsrv_i;
+ ref_shr_e = ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] -
+ ref_rsrv_e;
+
+ buf_shr_i /= OCELOT_BUFFER_CELL_SZ;
+ buf_shr_e /= OCELOT_BUFFER_CELL_SZ;
+
+ ocelot_wm_write(ocelot, BUF_COL_SHR_I(0), buf_shr_i);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_E(0), buf_shr_e);
+ ocelot_wm_write(ocelot, REF_COL_SHR_E(0), ref_shr_e);
+ ocelot_wm_write(ocelot, REF_COL_SHR_I(0), ref_shr_i);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_I(1), 0);
+ ocelot_wm_write(ocelot, BUF_COL_SHR_E(1), 0);
+ ocelot_wm_write(ocelot, REF_COL_SHR_E(1), 0);
+ ocelot_wm_write(ocelot, REF_COL_SHR_I(1), 0);
+}
+
+/* Ensure that all reservations can be enforced */
+static int ocelot_watermark_validate(struct ocelot *ocelot,
+ struct netlink_ext_ack *extack)
+{
+ u32 buf_rsrv_i, buf_rsrv_e;
+ u32 ref_rsrv_i, ref_rsrv_e;
+
+ ocelot_get_buf_rsrv(ocelot, &buf_rsrv_i, &buf_rsrv_e);
+ ocelot_get_ref_rsrv(ocelot, &ref_rsrv_i, &ref_rsrv_e);
+
+ if (buf_rsrv_i > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress frame reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (buf_rsrv_e > ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress frame reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (ref_rsrv_i > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress reference reservations exceed pool size");
+ return -ERANGE;
+ }
+ if (ref_rsrv_e > ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR]) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Egress reference reservations exceed pool size");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/* The hardware works like this:
+ *
+ * Frame forwarding decision taken
+ * |
+ * v
+ * +--------------------+--------------------+--------------------+
+ * | | | |
+ * v v v v
+ * Ingress memory Egress memory Ingress frame Egress frame
+ * check check reference check reference check
+ * | | | |
+ * v v v v
+ * BUF_Q_RSRV_I ok BUF_Q_RSRV_E ok REF_Q_RSRV_I ok REF_Q_RSRV_E ok
+ *(src port, prio) -+ (dst port, prio) -+ (src port, prio) -+ (dst port, prio) -+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_P_RSRV_I ok| BUF_P_RSRV_E ok| REF_P_RSRV_I ok| REF_P_RSRV_E ok|
+ * (src port) ----+ (dst port) ----+ (src port) ----+ (dst port) -----+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_PRIO_SHR_I ok| BUF_PRIO_SHR_E ok| REF_PRIO_SHR_I ok| REF_PRIO_SHR_E ok|
+ * (prio) ------+ (prio) ------+ (prio) ------+ (prio) -------+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v | v | v | v |
+ * BUF_COL_SHR_I ok| BUF_COL_SHR_E ok| REF_COL_SHR_I ok| REF_COL_SHR_E ok|
+ * (dp) -------+ (dp) -------+ (dp) -------+ (dp) --------+
+ * | | | | | | | |
+ * |exceeded | |exceeded | |exceeded | |exceeded |
+ * v v v v v v v v
+ * fail success fail success fail success fail success
+ * | | | | | | | |
+ * v v v v v v v v
+ * +-----+----+ +-----+----+ +-----+----+ +-----+-----+
+ * | | | |
+ * +-------> OR <-------+ +-------> OR <-------+
+ * | |
+ * v v
+ * +----------------> AND <-----------------+
+ * |
+ * v
+ * FIFO drop / accept
+ *
+ * We are modeling each of the 4 parallel lookups as a devlink-sb pool.
+ * At least one (ingress or egress) memory pool and one (ingress or egress)
+ * frame reference pool need to have resources for frame acceptance to succeed.
+ *
+ * The following watermarks are controlled explicitly through devlink-sb:
+ * BUF_Q_RSRV_I, BUF_Q_RSRV_E, REF_Q_RSRV_I, REF_Q_RSRV_E
+ * BUF_P_RSRV_I, BUF_P_RSRV_E, REF_P_RSRV_I, REF_P_RSRV_E
+ * The following watermarks are controlled implicitly through devlink-sb:
+ * BUF_COL_SHR_I, BUF_COL_SHR_E, REF_COL_SHR_I, REF_COL_SHR_E
+ * The following watermarks are unused and disabled:
+ * BUF_PRIO_SHR_I, BUF_PRIO_SHR_E, REF_PRIO_SHR_I, REF_PRIO_SHR_E
+ *
+ * This function overrides the hardware defaults with more sane ones (no
+ * reservations by default, let sharing use all resources) and disables the
+ * unused watermarks.
+ */
+static void ocelot_watermark_init(struct ocelot *ocelot)
+{
+ int all_tcs = GENMASK(OCELOT_NUM_TC - 1, 0);
+ int port;
+
+ ocelot_write(ocelot, all_tcs, QSYS_RES_QOS_MODE);
+
+ for (port = 0; port <= ocelot->num_phys_ports; port++)
+ ocelot_disable_reservation_watermarks(ocelot, port);
+
+ ocelot_disable_tc_sharing_watermarks(ocelot);
+ ocelot_setup_sharing_watermarks(ocelot);
+}
+
+/* Pool size and type are fixed up at runtime. Keeping this structure to
+ * look up the cell size multipliers.
+ */
+static const struct devlink_sb_pool_info ocelot_sb_pool[] = {
+ [OCELOT_SB_BUF] = {
+ .cell_size = OCELOT_BUFFER_CELL_SZ,
+ .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ },
+ [OCELOT_SB_REF] = {
+ .cell_size = 1,
+ .threshold_type = DEVLINK_SB_THRESHOLD_TYPE_STATIC,
+ },
+};
+
+/* Returns the pool size configured through ocelot_sb_pool_set */
+int ocelot_sb_pool_get(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ if (sb_index >= OCELOT_SB_NUM)
+ return -ENODEV;
+ if (pool_index >= OCELOT_SB_POOL_NUM)
+ return -ENODEV;
+
+ *pool_info = ocelot_sb_pool[sb_index];
+ pool_info->size = ocelot->pool_size[sb_index][pool_index];
+ if (pool_index)
+ pool_info->pool_type = DEVLINK_SB_POOL_TYPE_INGRESS;
+ else
+ pool_info->pool_type = DEVLINK_SB_POOL_TYPE_EGRESS;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_pool_get);
+
+/* The pool size received here configures the total amount of resources used on
+ * ingress (or on egress, depending upon the pool index). The pool size, minus
+ * the values for the port and port-tc reservations, is written into the
+ * COL_SHR(dp=0) sharing watermark.
+ */
+int ocelot_sb_pool_set(struct ocelot *ocelot, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ u32 old_pool_size;
+ int err;
+
+ if (sb_index >= OCELOT_SB_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid sb, use 0 for buffers and 1 for frame references");
+ return -ENODEV;
+ }
+ if (pool_index >= OCELOT_SB_POOL_NUM) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Invalid pool, use 0 for ingress and 1 for egress");
+ return -ENODEV;
+ }
+ if (threshold_type != DEVLINK_SB_THRESHOLD_TYPE_STATIC) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only static threshold supported");
+ return -EOPNOTSUPP;
+ }
+
+ old_pool_size = ocelot->pool_size[sb_index][pool_index];
+ ocelot->pool_size[sb_index][pool_index] = size;
+
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot->pool_size[sb_index][pool_index] = old_pool_size;
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_pool_set);
+
+/* This retrieves the configuration made with ocelot_sb_port_pool_set */
+int ocelot_sb_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ *p_threshold = ocelot_wm_read(ocelot, wm_index);
+ *p_threshold *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_port_pool_get);
+
+/* This configures the P_RSRV per-port reserved resource watermark */
+int ocelot_sb_port_pool_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold, struct netlink_ext_ack *extack)
+{
+ int wm_index, err;
+ u32 old_thr;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
+ return -ENODEV;
+ }
+
+ threshold /= ocelot_sb_pool[sb_index].cell_size;
+
+ old_thr = ocelot_wm_read(ocelot, wm_index);
+ ocelot_wm_write(ocelot, wm_index, threshold);
+
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot_wm_write(ocelot, wm_index, old_thr);
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_port_pool_set);
+
+/* This retrieves the configuration done by ocelot_sb_tc_pool_bind_set */
+int ocelot_sb_tc_pool_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ *p_threshold = ocelot_wm_read(ocelot, wm_index);
+ *p_threshold *= ocelot_sb_pool[sb_index].cell_size;
+
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ *p_pool_index = 0;
+ else
+ *p_pool_index = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_get);
+
+/* This configures the Q_RSRV per-port-tc reserved resource watermark */
+int ocelot_sb_tc_pool_bind_set(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ int wm_index, err;
+ u32 old_thr;
+
+ /* Paranoid check? */
+ if (pool_index == OCELOT_SB_POOL_ING &&
+ pool_type != DEVLINK_SB_POOL_TYPE_INGRESS)
+ return -EINVAL;
+ if (pool_index == OCELOT_SB_POOL_EGR &&
+ pool_type != DEVLINK_SB_POOL_TYPE_EGRESS)
+ return -EINVAL;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid shared buffer");
+ return -ENODEV;
+ }
+
+ threshold /= ocelot_sb_pool[sb_index].cell_size;
+
+ old_thr = ocelot_wm_read(ocelot, wm_index);
+ ocelot_wm_write(ocelot, wm_index, threshold);
+ err = ocelot_watermark_validate(ocelot, extack);
+ if (err) {
+ ocelot_wm_write(ocelot, wm_index, old_thr);
+ return err;
+ }
+
+ ocelot_setup_sharing_watermarks(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_tc_pool_bind_set);
+
+/* The hardware does not support atomic snapshots, we'll read out the
+ * occupancy registers individually and have this as just a stub.
+ */
+int ocelot_sb_occ_snapshot(struct ocelot *ocelot, unsigned int sb_index)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_snapshot);
+
+/* The watermark occupancy registers are cleared upon read,
+ * so let's read them.
+ */
+int ocelot_sb_occ_max_clear(struct ocelot *ocelot, unsigned int sb_index)
+{
+ u32 inuse, maxuse;
+ int port, prio;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_status(ocelot, BUF_Q_RSRV_I(port, prio),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, BUF_Q_RSRV_E(port, prio),
+ &inuse, &maxuse);
+ }
+ ocelot_wm_status(ocelot, BUF_P_RSRV_I(port),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, BUF_P_RSRV_E(port),
+ &inuse, &maxuse);
+ }
+ break;
+ case OCELOT_SB_REF:
+ for (port = 0; port <= ocelot->num_phys_ports; port++) {
+ for (prio = 0; prio < OCELOT_NUM_TC; prio++) {
+ ocelot_wm_status(ocelot, REF_Q_RSRV_I(port, prio),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, REF_Q_RSRV_E(port, prio),
+ &inuse, &maxuse);
+ }
+ ocelot_wm_status(ocelot, REF_P_RSRV_I(port),
+ &inuse, &maxuse);
+ ocelot_wm_status(ocelot, REF_P_RSRV_E(port),
+ &inuse, &maxuse);
+ }
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_max_clear);
+
+/* This retrieves the watermark occupancy for per-port P_RSRV watermarks */
+int ocelot_sb_occ_port_pool_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = BUF_P_RSRV_I(port);
+ else
+ wm_index = BUF_P_RSRV_E(port);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_index == OCELOT_SB_POOL_ING)
+ wm_index = REF_P_RSRV_I(port);
+ else
+ wm_index = REF_P_RSRV_E(port);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
+ *p_cur *= ocelot_sb_pool[sb_index].cell_size;
+ *p_max *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_port_pool_get);
+
+/* This retrieves the watermark occupancy for per-port-tc Q_RSRV watermarks */
+int ocelot_sb_occ_tc_port_bind_get(struct ocelot *ocelot, int port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ int wm_index;
+
+ switch (sb_index) {
+ case OCELOT_SB_BUF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = BUF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = BUF_Q_RSRV_E(port, tc_index);
+ break;
+ case OCELOT_SB_REF:
+ if (pool_type == DEVLINK_SB_POOL_TYPE_INGRESS)
+ wm_index = REF_Q_RSRV_I(port, tc_index);
+ else
+ wm_index = REF_Q_RSRV_E(port, tc_index);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ocelot_wm_status(ocelot, wm_index, p_cur, p_max);
+ *p_cur *= ocelot_sb_pool[sb_index].cell_size;
+ *p_max *= ocelot_sb_pool[sb_index].cell_size;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_sb_occ_tc_port_bind_get);
+
+int ocelot_devlink_sb_register(struct ocelot *ocelot)
+{
+ int err;
+
+ err = devlink_sb_register(ocelot->devlink, OCELOT_SB_BUF,
+ ocelot->packet_buffer_size, 1, 1,
+ OCELOT_NUM_TC, OCELOT_NUM_TC);
+ if (err)
+ return err;
+
+ err = devlink_sb_register(ocelot->devlink, OCELOT_SB_REF,
+ ocelot->num_frame_refs, 1, 1,
+ OCELOT_NUM_TC, OCELOT_NUM_TC);
+ if (err) {
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
+ return err;
+ }
+
+ ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_ING] = ocelot->packet_buffer_size;
+ ocelot->pool_size[OCELOT_SB_BUF][OCELOT_SB_POOL_EGR] = ocelot->packet_buffer_size;
+ ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_ING] = ocelot->num_frame_refs;
+ ocelot->pool_size[OCELOT_SB_REF][OCELOT_SB_POOL_EGR] = ocelot->num_frame_refs;
+
+ ocelot_watermark_init(ocelot);
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_devlink_sb_register);
+
+void ocelot_devlink_sb_unregister(struct ocelot *ocelot)
+{
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_BUF);
+ devlink_sb_unregister(ocelot->devlink, OCELOT_SB_REF);
+}
+EXPORT_SYMBOL(ocelot_devlink_sb_unregister);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 729495a1a77e..c3ac026f6aea 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -622,7 +622,8 @@ static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
int ret;
filter->prio = f->common.prio;
- filter->id = f->cookie;
+ filter->id.cookie = f->cookie;
+ filter->id.tc_offload = true;
ret = ocelot_flower_parse_action(ocelot, port, ingress, f, filter);
if (ret)
@@ -717,7 +718,7 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
block = &ocelot->block[block_id];
- filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
if (!filter)
return 0;
@@ -741,7 +742,7 @@ int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
block = &ocelot->block[block_id];
- filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie, true);
if (!filter || filter->type == OCELOT_VCAP_FILTER_DUMMY)
return 0;
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index 0acb45948418..ea4e83410fe4 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
}
EXPORT_SYMBOL(ocelot_port_writel);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+{
+ u32 cur = ocelot_port_readl(port, reg);
+
+ ocelot_port_writel(port, (cur & (~mask)) | val, reg);
+}
+EXPORT_SYMBOL(ocelot_port_rmwl);
+
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
new file mode 100644
index 000000000000..683da320bfd8
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Microsemi Ocelot Switch driver
+ *
+ * This contains glue logic between the switchdev driver operations and the
+ * mscc_ocelot_switch_lib.
+ *
+ * Copyright (c) 2017, 2019 Microsemi Corporation
+ * Copyright 2020-2021 NXP Semiconductors
+ */
+
+#include <linux/if_bridge.h>
+#include <linux/mrp_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include <uapi/linux/mrp_bridge.h>
+#include "ocelot.h"
+#include "ocelot_vcap.h"
+
+static int ocelot_mrp_del_vcap(struct ocelot *ocelot, int port)
+{
+ struct ocelot_vcap_block *block_vcap_is2;
+ struct ocelot_vcap_filter *filter;
+
+ block_vcap_is2 = &ocelot->block[VCAP_IS2];
+ filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, port,
+ false);
+ if (!filter)
+ return 0;
+
+ return ocelot_vcap_filter_del(ocelot, filter);
+}
+
+int ocelot_mrp_add(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+ struct net_device *dev;
+
+ if (!ocelot_port)
+ return -EOPNOTSUPP;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+ dev = priv->dev;
+
+ if (mrp->p_port != dev && mrp->s_port != dev)
+ return 0;
+
+ if (ocelot->mrp_ring_id != 0 &&
+ ocelot->mrp_s_port &&
+ ocelot->mrp_p_port)
+ return -EINVAL;
+
+ if (mrp->p_port == dev)
+ ocelot->mrp_p_port = dev;
+
+ if (mrp->s_port == dev)
+ ocelot->mrp_s_port = dev;
+
+ ocelot->mrp_ring_id = mrp->ring_id;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_mrp_add);
+
+int ocelot_mrp_del(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+ struct net_device *dev;
+
+ if (!ocelot_port)
+ return -EOPNOTSUPP;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+ dev = priv->dev;
+
+ if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ return 0;
+
+ if (ocelot->mrp_ring_id == 0 &&
+ !ocelot->mrp_s_port &&
+ !ocelot->mrp_p_port)
+ return -EINVAL;
+
+ if (ocelot_mrp_del_vcap(ocelot, priv->chip_port))
+ return -EINVAL;
+
+ if (ocelot->mrp_p_port == dev)
+ ocelot->mrp_p_port = NULL;
+
+ if (ocelot->mrp_s_port == dev)
+ ocelot->mrp_s_port = NULL;
+
+ ocelot->mrp_ring_id = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL(ocelot_mrp_del);
+
+int ocelot_mrp_add_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_port_private *priv;
+ struct net_device *dev;
+ int err;
+
+ if (!ocelot_port)
+ return -EOPNOTSUPP;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+ dev = priv->dev;
+
+ if (ocelot->mrp_ring_id != mrp->ring_id)
+ return -EINVAL;
+
+ if (!mrp->sw_backup)
+ return -EOPNOTSUPP;
+
+ if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ return 0;
+
+ filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->key_type = OCELOT_VCAP_KEY_ETYPE;
+ filter->prio = 1;
+ filter->id.cookie = priv->chip_port;
+ filter->id.tc_offload = false;
+ filter->block_id = VCAP_IS2;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ filter->ingress_port_mask = BIT(priv->chip_port);
+ *(__be16 *)filter->key.etype.etype.value = htons(ETH_P_MRP);
+ *(__be16 *)filter->key.etype.etype.mask = htons(0xffff);
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0x0;
+ filter->action.cpu_copy_ena = true;
+ filter->action.cpu_qu_num = OCELOT_MRP_CPUQ;
+
+ err = ocelot_vcap_filter_add(ocelot, filter, NULL);
+ if (err)
+ kfree(filter);
+
+ return err;
+}
+EXPORT_SYMBOL(ocelot_mrp_add_ring_role);
+
+int ocelot_mrp_del_ring_role(struct ocelot *ocelot, int port,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+ struct net_device *dev;
+
+ if (!ocelot_port)
+ return -EOPNOTSUPP;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+ dev = priv->dev;
+
+ if (ocelot->mrp_ring_id != mrp->ring_id)
+ return -EINVAL;
+
+ if (!mrp->sw_backup)
+ return -EOPNOTSUPP;
+
+ if (ocelot->mrp_p_port != dev && ocelot->mrp_s_port != dev)
+ return 0;
+
+ return ocelot_mrp_del_vcap(ocelot, priv->chip_port);
+}
+EXPORT_SYMBOL(ocelot_mrp_del_ring_role);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 42230f92ca9c..12cb6867a2d0 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1,13 +1,191 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Microsemi Ocelot Switch driver
*
+ * This contains glue logic between the switchdev driver operations and the
+ * mscc_ocelot_switch_lib.
+ *
* Copyright (c) 2017, 2019 Microsemi Corporation
+ * Copyright 2020-2021 NXP Semiconductors
*/
#include <linux/if_bridge.h>
+#include <net/pkt_cls.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
+static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp)
+{
+ return devlink_priv(dlp->devlink);
+}
+
+static int devlink_port_to_port(struct devlink_port *dlp)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+
+ return dlp - ocelot->devlink_ports;
+}
+
+static int ocelot_devlink_sb_pool_get(struct devlink *dl,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
+}
+
+static int ocelot_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
+ u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
+ threshold_type, extack);
+}
+
+static int ocelot_devlink_sb_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_threshold);
+}
+
+static int ocelot_devlink_sb_port_pool_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
+ threshold, extack);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
+ pool_type, p_pool_index,
+ p_threshold);
+}
+
+static int
+ocelot_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
+ pool_type, pool_index, threshold,
+ extack);
+}
+
+static int ocelot_devlink_sb_occ_snapshot(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_snapshot(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_max_clear(struct devlink *dl,
+ unsigned int sb_index)
+{
+ struct ocelot *ocelot = devlink_priv(dl);
+
+ return ocelot_sb_occ_max_clear(ocelot, sb_index);
+}
+
+static int ocelot_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
+ unsigned int sb_index,
+ u16 pool_index, u32 *p_cur,
+ u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
+ p_cur, p_max);
+}
+
+static int
+ocelot_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct ocelot *ocelot = devlink_port_to_ocelot(dlp);
+ int port = devlink_port_to_port(dlp);
+
+ return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index,
+ tc_index, pool_type,
+ p_cur, p_max);
+}
+
+const struct devlink_ops ocelot_devlink_ops = {
+ .sb_pool_get = ocelot_devlink_sb_pool_get,
+ .sb_pool_set = ocelot_devlink_sb_pool_set,
+ .sb_port_pool_get = ocelot_devlink_sb_port_pool_get,
+ .sb_port_pool_set = ocelot_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = ocelot_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = ocelot_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = ocelot_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = ocelot_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = ocelot_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = ocelot_devlink_sb_occ_tc_port_bind_get,
+};
+
+int ocelot_port_devlink_init(struct ocelot *ocelot, int port,
+ enum devlink_port_flavour flavour)
+{
+ struct devlink_port *dlp = &ocelot->devlink_ports[port];
+ int id_len = sizeof(ocelot->base_mac);
+ struct devlink *dl = ocelot->devlink;
+ struct devlink_port_attrs attrs = {};
+
+ memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len);
+ attrs.switch_id.id_len = id_len;
+ attrs.phys.port_number = port;
+ attrs.flavour = flavour;
+
+ devlink_port_attrs_set(dlp, &attrs);
+
+ return devlink_port_register(dl, dlp, port);
+}
+
+void ocelot_port_devlink_teardown(struct ocelot *ocelot, int port)
+{
+ struct devlink_port *dlp = &ocelot->devlink_ports[port];
+
+ devlink_port_unregister(dlp);
+}
+
+static struct devlink_port *ocelot_get_devlink_port(struct net_device *dev)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot *ocelot = priv->port.ocelot;
+ int port = priv->chip_port;
+
+ return &ocelot->devlink_ports[port];
+}
+
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
struct flow_cls_offload *f,
bool ingress)
@@ -310,53 +488,20 @@ static int ocelot_port_stop(struct net_device *dev)
return 0;
}
-/* Generate the IFH for frame injection
- *
- * The IFH is a 128bit-value
- * bit 127: bypass the analyzer processing
- * bit 56-67: destination mask
- * bit 28-29: pop_cnt: 3 disables all rewriting of the frame
- * bit 20-27: cpu extraction queue mask
- * bit 16: tag type 0: C-tag, 1: S-tag
- * bit 0-11: VID
- */
-static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
-{
- ifh[0] = IFH_INJ_BYPASS | ((0x1ff & info->rew_op) << 21);
- ifh[1] = (0xf00 & info->port) >> 8;
- ifh[2] = (0xff & info->port) << 24;
- ifh[3] = (info->tag_type << 16) | info->vid;
-
- return 0;
-}
-
-static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ocelot_port_private *priv = netdev_priv(dev);
- struct skb_shared_info *shinfo = skb_shinfo(skb);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
- u32 val, ifh[OCELOT_TAG_LEN / 4];
- struct frame_info info = {};
- u8 grp = 0; /* Send everything on CPU group 0 */
- unsigned int i, count, last;
int port = priv->chip_port;
+ u32 rew_op = 0;
- val = ocelot_read(ocelot, QS_INJ_STATUS);
- if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
- (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp))))
+ if (!ocelot_can_inject(ocelot, 0))
return NETDEV_TX_BUSY;
- ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
- QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
-
- info.port = BIT(port);
- info.tag_type = IFH_TAG_TYPE_C;
- info.vid = skb_vlan_tag_get(skb);
-
/* Check if timestamping is needed */
- if (ocelot->ptp && (shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
- info.rew_op = ocelot_port->ptp_cmd;
+ if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ rew_op = ocelot_port->ptp_cmd;
if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
struct sk_buff *clone;
@@ -369,45 +514,11 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
ocelot_port_add_txtstamp_skb(ocelot, port, clone);
- info.rew_op |= clone->cb[0] << 3;
+ rew_op |= clone->cb[0] << 3;
}
}
- if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
- info.rew_op = ocelot_port->ptp_cmd;
- if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP)
- info.rew_op |= skb->cb[0] << 3;
- }
-
- ocelot_gen_ifh(ifh, &info);
-
- for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
- ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
- QS_INJ_WR, grp);
-
- count = (skb->len + 3) / 4;
- last = skb->len % 4;
- for (i = 0; i < count; i++)
- ocelot_write_rix(ocelot, ((u32 *)skb->data)[i], QS_INJ_WR, grp);
-
- /* Add padding */
- while (i < (OCELOT_BUFFER_CELL_SZ / 4)) {
- ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
- i++;
- }
-
- /* Indicate EOF and valid bytes in last word */
- ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
- QS_INJ_CTRL_VLD_BYTES(skb->len < OCELOT_BUFFER_CELL_SZ ? 0 : last) |
- QS_INJ_CTRL_EOF,
- QS_INJ_CTRL, grp);
-
- /* Add dummy CRC */
- ocelot_write_rix(ocelot, 0, QS_INJ_WR, grp);
- skb_tx_timestamp(skb);
-
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
+ ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
kfree_skb(skb);
@@ -457,7 +568,7 @@ static void ocelot_mact_work(struct work_struct *work)
break;
default:
break;
- };
+ }
kfree(w);
}
@@ -525,20 +636,6 @@ static void ocelot_set_rx_mode(struct net_device *dev)
__dev_mc_sync(dev, ocelot_mc_sync, ocelot_mc_unsync);
}
-static int ocelot_port_get_phys_port_name(struct net_device *dev,
- char *buf, size_t len)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- int port = priv->chip_port;
- int ret;
-
- ret = snprintf(buf, len, "p%d", port);
- if (ret >= len)
- return -EINVAL;
-
- return 0;
-}
-
static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -689,18 +786,6 @@ static int ocelot_set_features(struct net_device *dev,
return 0;
}
-static int ocelot_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct ocelot_port_private *priv = netdev_priv(dev);
- struct ocelot *ocelot = priv->port.ocelot;
-
- ppid->id_len = sizeof(ocelot->base_mac);
- memcpy(&ppid->id, &ocelot->base_mac, ppid->id_len);
-
- return 0;
-}
-
static int ocelot_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct ocelot_port_private *priv = netdev_priv(dev);
@@ -727,7 +812,6 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_stop = ocelot_port_stop,
.ndo_start_xmit = ocelot_port_xmit,
.ndo_set_rx_mode = ocelot_set_rx_mode,
- .ndo_get_phys_port_name = ocelot_port_get_phys_port_name,
.ndo_set_mac_address = ocelot_port_set_mac_address,
.ndo_get_stats64 = ocelot_get_stats64,
.ndo_fdb_add = ocelot_port_fdb_add,
@@ -736,9 +820,9 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_vlan_rx_add_vid = ocelot_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid,
.ndo_set_features = ocelot_set_features,
- .ndo_get_port_parent_id = ocelot_get_port_parent_id,
.ndo_setup_tc = ocelot_setup_tc,
.ndo_do_ioctl = ocelot_ioctl,
+ .ndo_get_devlink_port = ocelot_get_devlink_port,
};
struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
@@ -825,12 +909,8 @@ static const struct ethtool_ops ocelot_ethtool_ops = {
};
static void ocelot_port_attr_stp_state_set(struct ocelot *ocelot, int port,
- struct switchdev_trans *trans,
u8 state)
{
- if (switchdev_trans_ph_prepare(trans))
- return;
-
ocelot_bridge_stp_state_set(ocelot, port, state);
}
@@ -859,7 +939,7 @@ static void ocelot_port_attr_mc_set(struct ocelot *ocelot, int port, bool mc)
static int ocelot_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
@@ -868,19 +948,24 @@ static int ocelot_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ocelot_port_attr_stp_state_set(ocelot, port, trans,
- attr->u.stp_state);
+ ocelot_port_attr_stp_state_set(ocelot, port, attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
- ocelot_port_vlan_filtering(ocelot, port,
- attr->u.vlan_filtering, trans);
+ ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ err = ocelot_port_pre_bridge_flags(ocelot, port,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ocelot_port_bridge_flags(ocelot, port, attr->u.brport_flags);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -890,73 +975,89 @@ static int ocelot_port_attr_set(struct net_device *dev,
}
static int ocelot_port_obj_add_vlan(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
int ret;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
-
- if (switchdev_trans_ph_prepare(trans))
- ret = ocelot_vlan_vid_prepare(dev, vid, pvid,
- untagged);
- else
- ret = ocelot_vlan_vid_add(dev, vid, pvid, untagged);
- if (ret)
- return ret;
- }
- return 0;
+ ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged);
+ if (ret)
+ return ret;
+
+ return ocelot_vlan_vid_add(dev, vlan->vid, pvid, untagged);
}
-static int ocelot_port_vlan_del_vlan(struct net_device *dev,
- const struct switchdev_obj_port_vlan *vlan)
+static int ocelot_port_obj_add_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb)
{
- int ret;
- u16 vid;
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- ret = ocelot_vlan_vid_del(dev, vid);
+ return ocelot_port_mdb_add(ocelot, port, mdb);
+}
- if (ret)
- return ret;
- }
+static int ocelot_port_obj_del_mdb(struct net_device *dev,
+ const struct switchdev_obj_port_mdb *mdb)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
- return 0;
+ return ocelot_port_mdb_del(ocelot, port, mdb);
}
-static int ocelot_port_obj_add_mdb(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+static int ocelot_port_obj_mrp_add(struct net_device *dev,
+ const struct switchdev_obj_mrp *mrp)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ return ocelot_mrp_add(ocelot, port, mrp);
+}
- return ocelot_port_mdb_add(ocelot, port, mdb);
+static int ocelot_port_obj_mrp_del(struct net_device *dev,
+ const struct switchdev_obj_mrp *mrp)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_mrp_del(ocelot, port, mrp);
}
-static int ocelot_port_obj_del_mdb(struct net_device *dev,
- const struct switchdev_obj_port_mdb *mdb)
+static int
+ocelot_port_obj_mrp_add_ring_role(struct net_device *dev,
+ const struct switchdev_obj_ring_role_mrp *mrp)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
struct ocelot *ocelot = ocelot_port->ocelot;
int port = priv->chip_port;
- return ocelot_port_mdb_del(ocelot, port, mdb);
+ return ocelot_mrp_add_ring_role(ocelot, port, mrp);
+}
+
+static int
+ocelot_port_obj_mrp_del_ring_role(struct net_device *dev,
+ const struct switchdev_obj_ring_role_mrp *mrp)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ return ocelot_mrp_del_ring_role(ocelot, port, mrp);
}
static int ocelot_port_obj_add(struct net_device *dev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
int ret = 0;
@@ -964,12 +1065,17 @@ static int ocelot_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
ret = ocelot_port_obj_add_vlan(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
- ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj),
- trans);
+ ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_MRP:
+ ret = ocelot_port_obj_mrp_add(dev, SWITCHDEV_OBJ_MRP(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
+ ret = ocelot_port_obj_mrp_add_ring_role(dev,
+ SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
break;
default:
return -EOPNOTSUPP;
@@ -985,12 +1091,19 @@ static int ocelot_port_obj_del(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- ret = ocelot_port_vlan_del_vlan(dev,
- SWITCHDEV_OBJ_PORT_VLAN(obj));
+ ret = ocelot_vlan_vid_del(dev,
+ SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
ret = ocelot_port_obj_del_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj));
break;
+ case SWITCHDEV_OBJ_ID_MRP:
+ ret = ocelot_port_obj_mrp_del(dev, SWITCHDEV_OBJ_MRP(obj));
+ break;
+ case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
+ ret = ocelot_port_obj_mrp_del_ring_role(dev,
+ SWITCHDEV_OBJ_RING_ROLE_MRP(obj));
+ break;
default:
return -EOPNOTSUPP;
}
@@ -998,9 +1111,42 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-static int ocelot_netdevice_port_event(struct net_device *dev,
- unsigned long event,
- struct netdev_notifier_changeupper_info *info)
+static int ocelot_netdevice_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags;
+ int err;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask;
+
+ err = ocelot_port_bridge_join(ocelot, port, bridge);
+ if (err)
+ return err;
+
+ ocelot_port_bridge_flags(ocelot, port, flags);
+
+ return 0;
+}
+
+static int ocelot_netdevice_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge)
+{
+ struct switchdev_brport_flags flags;
+ int err;
+
+ flags.mask = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD;
+ flags.val = flags.mask & ~BR_LEARNING;
+
+ err = ocelot_port_bridge_leave(ocelot, port, bridge);
+
+ ocelot_port_bridge_flags(ocelot, port, flags);
+
+ return err;
+}
+
+static int ocelot_netdevice_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot_port *ocelot_port = &priv->port;
@@ -1008,71 +1154,103 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
int port = priv->chip_port;
int err = 0;
- switch (event) {
- case NETDEV_CHANGEUPPER:
- if (netif_is_bridge_master(info->upper_dev)) {
- if (info->linking) {
- err = ocelot_port_bridge_join(ocelot, port,
- info->upper_dev);
- } else {
- err = ocelot_port_bridge_leave(ocelot, port,
- info->upper_dev);
- }
- }
- if (netif_is_lag_master(info->upper_dev)) {
- if (info->linking)
- err = ocelot_port_lag_join(ocelot, port,
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking) {
+ err = ocelot_netdevice_bridge_join(ocelot, port,
info->upper_dev);
- else
- ocelot_port_lag_leave(ocelot, port,
- info->upper_dev);
+ } else {
+ err = ocelot_netdevice_bridge_leave(ocelot, port,
+ info->upper_dev);
+ }
+ }
+ if (netif_is_lag_master(info->upper_dev)) {
+ if (info->linking) {
+ err = ocelot_port_lag_join(ocelot, port,
+ info->upper_dev,
+ info->upper_info);
+ if (err == -EOPNOTSUPP) {
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Offloading not supported");
+ err = 0;
+ }
+ } else {
+ ocelot_port_lag_leave(ocelot, port,
+ info->upper_dev);
}
- break;
- default:
- break;
}
- return err;
+ return notifier_from_errno(err);
+}
+
+static int
+ocelot_netdevice_lag_changeupper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+ int err = NOTIFY_DONE;
+
+ netdev_for_each_lower_dev(dev, lower, iter) {
+ err = ocelot_netdevice_changeupper(lower, info);
+ if (err)
+ return notifier_from_errno(err);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int
+ocelot_netdevice_changelowerstate(struct net_device *dev,
+ struct netdev_lag_lower_state_info *info)
+{
+ struct ocelot_port_private *priv = netdev_priv(dev);
+ bool is_active = info->link_up && info->tx_enabled;
+ struct ocelot_port *ocelot_port = &priv->port;
+ struct ocelot *ocelot = ocelot_port->ocelot;
+ int port = priv->chip_port;
+
+ if (!ocelot_port->bond)
+ return NOTIFY_DONE;
+
+ if (ocelot_port->lag_tx_active == is_active)
+ return NOTIFY_DONE;
+
+ ocelot_port_lag_change(ocelot, port, is_active);
+
+ return NOTIFY_OK;
}
static int ocelot_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
- struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- int ret = 0;
- if (event == NETDEV_PRECHANGEUPPER &&
- ocelot_netdevice_dev_check(dev) &&
- netif_is_lag_master(info->upper_dev)) {
- struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
- struct netlink_ext_ack *extack;
+ switch (event) {
+ case NETDEV_CHANGEUPPER: {
+ struct netdev_notifier_changeupper_info *info = ptr;
- if (lag_upper_info &&
- lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
- extack = netdev_notifier_info_to_extack(&info->info);
- NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
+ if (ocelot_netdevice_dev_check(dev))
+ return ocelot_netdevice_changeupper(dev, info);
- ret = -EINVAL;
- goto notify;
- }
+ if (netif_is_lag_master(dev))
+ return ocelot_netdevice_lag_changeupper(dev, info);
+
+ break;
}
+ case NETDEV_CHANGELOWERSTATE: {
+ struct netdev_notifier_changelowerstate_info *info = ptr;
- if (netif_is_lag_master(dev)) {
- struct net_device *slave;
- struct list_head *iter;
+ if (!ocelot_netdevice_dev_check(dev))
+ break;
- netdev_for_each_lower_dev(dev, slave, iter) {
- ret = ocelot_netdevice_port_event(slave, event, info);
- if (ret)
- goto notify;
- }
- } else {
- ret = ocelot_netdevice_port_event(dev, event, info);
+ return ocelot_netdevice_changelowerstate(dev,
+ info->lower_state_info);
+ }
+ default:
+ break;
}
-notify:
- return notifier_from_errno(ret);
+ return NOTIFY_DONE;
}
struct notifier_block ocelot_netdevice_nb __read_mostly = {
@@ -1171,7 +1349,19 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
if (err) {
dev_err(ocelot->dev, "register_netdev failed\n");
free_netdev(dev);
+ ocelot->ports[port] = NULL;
+ return err;
}
- return err;
+ return 0;
+}
+
+void ocelot_release_port(struct ocelot_port *ocelot_port)
+{
+ struct ocelot_port_private *priv = container_of(ocelot_port,
+ struct ocelot_port_private,
+ port);
+
+ unregister_netdev(priv->dev);
+ free_netdev(priv->dev);
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index d8c778ee6f1b..37a232911395 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -959,6 +959,12 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
list_add(&filter->list, pos->prev);
}
+static bool ocelot_vcap_filter_equal(const struct ocelot_vcap_filter *a,
+ const struct ocelot_vcap_filter *b)
+{
+ return !memcmp(&a->id, &b->id, sizeof(struct ocelot_vcap_id));
+}
+
static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
@@ -966,7 +972,7 @@ static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
int index = 0;
list_for_each_entry(tmp, &block->rules, list) {
- if (filter->id == tmp->id)
+ if (ocelot_vcap_filter_equal(filter, tmp))
return index;
index++;
}
@@ -991,16 +997,19 @@ ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
}
struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id)
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int cookie,
+ bool tc_offload)
{
struct ocelot_vcap_filter *filter;
list_for_each_entry(filter, &block->rules, list)
- if (filter->id == id)
+ if (filter->id.tc_offload == tc_offload &&
+ filter->id.cookie == cookie)
return filter;
return NULL;
}
+EXPORT_SYMBOL(ocelot_vcap_block_find_filter_by_id);
/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
* on destination and source MAC addresses, but only on higher-level protocol
@@ -1150,6 +1159,7 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
vcap_entry_set(ocelot, index, filter);
return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_filter_add);
static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
struct ocelot_vcap_block *block,
@@ -1160,7 +1170,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
list_for_each_safe(pos, q, &block->rules) {
tmp = list_entry(pos, struct ocelot_vcap_filter, list);
- if (tmp->id == filter->id) {
+ if (ocelot_vcap_filter_equal(filter, tmp)) {
if (tmp->block_id == VCAP_IS2 &&
tmp->action.police_ena)
ocelot_vcap_policer_del(ocelot, block,
@@ -1204,6 +1214,7 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
return 0;
}
+EXPORT_SYMBOL(ocelot_vcap_filter_del);
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
index 82fd10581a14..523611ccc48f 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.h
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.h
@@ -7,304 +7,13 @@
#define _MSCC_OCELOT_VCAP_H_
#include "ocelot.h"
-#include "ocelot_police.h"
-#include <net/sch_generic.h>
-#include <net/pkt_cls.h>
+#include <soc/mscc/ocelot_vcap.h>
+#include <net/flow_offload.h>
#define OCELOT_POLICER_DISCARD 0x17f
-struct ocelot_ipv4 {
- u8 addr[4];
-};
-
-enum ocelot_vcap_bit {
- OCELOT_VCAP_BIT_ANY,
- OCELOT_VCAP_BIT_0,
- OCELOT_VCAP_BIT_1
-};
-
-struct ocelot_vcap_u8 {
- u8 value[1];
- u8 mask[1];
-};
-
-struct ocelot_vcap_u16 {
- u8 value[2];
- u8 mask[2];
-};
-
-struct ocelot_vcap_u24 {
- u8 value[3];
- u8 mask[3];
-};
-
-struct ocelot_vcap_u32 {
- u8 value[4];
- u8 mask[4];
-};
-
-struct ocelot_vcap_u40 {
- u8 value[5];
- u8 mask[5];
-};
-
-struct ocelot_vcap_u48 {
- u8 value[6];
- u8 mask[6];
-};
-
-struct ocelot_vcap_u64 {
- u8 value[8];
- u8 mask[8];
-};
-
-struct ocelot_vcap_u128 {
- u8 value[16];
- u8 mask[16];
-};
-
-struct ocelot_vcap_vid {
- u16 value;
- u16 mask;
-};
-
-struct ocelot_vcap_ipv4 {
- struct ocelot_ipv4 value;
- struct ocelot_ipv4 mask;
-};
-
-struct ocelot_vcap_udp_tcp {
- u16 value;
- u16 mask;
-};
-
-struct ocelot_vcap_port {
- u8 value;
- u8 mask;
-};
-
-enum ocelot_vcap_key_type {
- OCELOT_VCAP_KEY_ANY,
- OCELOT_VCAP_KEY_ETYPE,
- OCELOT_VCAP_KEY_LLC,
- OCELOT_VCAP_KEY_SNAP,
- OCELOT_VCAP_KEY_ARP,
- OCELOT_VCAP_KEY_IPV4,
- OCELOT_VCAP_KEY_IPV6
-};
-
-struct ocelot_vcap_key_vlan {
- struct ocelot_vcap_vid vid; /* VLAN ID (12 bit) */
- struct ocelot_vcap_u8 pcp; /* PCP (3 bit) */
- enum ocelot_vcap_bit dei; /* DEI */
- enum ocelot_vcap_bit tagged; /* Tagged/untagged frame */
-};
-
-struct ocelot_vcap_key_etype {
- struct ocelot_vcap_u48 dmac;
- struct ocelot_vcap_u48 smac;
- struct ocelot_vcap_u16 etype;
- struct ocelot_vcap_u16 data; /* MAC data */
-};
-
-struct ocelot_vcap_key_llc {
- struct ocelot_vcap_u48 dmac;
- struct ocelot_vcap_u48 smac;
-
- /* LLC header: DSAP at byte 0, SSAP at byte 1, Control at byte 2 */
- struct ocelot_vcap_u32 llc;
-};
-
-struct ocelot_vcap_key_snap {
- struct ocelot_vcap_u48 dmac;
- struct ocelot_vcap_u48 smac;
-
- /* SNAP header: Organization Code at byte 0, Type at byte 3 */
- struct ocelot_vcap_u40 snap;
-};
-
-struct ocelot_vcap_key_arp {
- struct ocelot_vcap_u48 smac;
- enum ocelot_vcap_bit arp; /* Opcode ARP/RARP */
- enum ocelot_vcap_bit req; /* Opcode request/reply */
- enum ocelot_vcap_bit unknown; /* Opcode unknown */
- enum ocelot_vcap_bit smac_match; /* Sender MAC matches SMAC */
- enum ocelot_vcap_bit dmac_match; /* Target MAC matches DMAC */
-
- /**< Protocol addr. length 4, hardware length 6 */
- enum ocelot_vcap_bit length;
-
- enum ocelot_vcap_bit ip; /* Protocol address type IP */
- enum ocelot_vcap_bit ethernet; /* Hardware address type Ethernet */
- struct ocelot_vcap_ipv4 sip; /* Sender IP address */
- struct ocelot_vcap_ipv4 dip; /* Target IP address */
-};
-
-struct ocelot_vcap_key_ipv4 {
- enum ocelot_vcap_bit ttl; /* TTL zero */
- enum ocelot_vcap_bit fragment; /* Fragment */
- enum ocelot_vcap_bit options; /* Header options */
- struct ocelot_vcap_u8 ds;
- struct ocelot_vcap_u8 proto; /* Protocol */
- struct ocelot_vcap_ipv4 sip; /* Source IP address */
- struct ocelot_vcap_ipv4 dip; /* Destination IP address */
- struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
- struct ocelot_vcap_udp_tcp sport; /* UDP/TCP: Source port */
- struct ocelot_vcap_udp_tcp dport; /* UDP/TCP: Destination port */
- enum ocelot_vcap_bit tcp_fin;
- enum ocelot_vcap_bit tcp_syn;
- enum ocelot_vcap_bit tcp_rst;
- enum ocelot_vcap_bit tcp_psh;
- enum ocelot_vcap_bit tcp_ack;
- enum ocelot_vcap_bit tcp_urg;
- enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
- enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
- enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
-};
-
-struct ocelot_vcap_key_ipv6 {
- struct ocelot_vcap_u8 proto; /* IPv6 protocol */
- struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
- struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */
- enum ocelot_vcap_bit ttl; /* TTL zero */
- struct ocelot_vcap_u8 ds;
- struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
- struct ocelot_vcap_udp_tcp sport;
- struct ocelot_vcap_udp_tcp dport;
- enum ocelot_vcap_bit tcp_fin;
- enum ocelot_vcap_bit tcp_syn;
- enum ocelot_vcap_bit tcp_rst;
- enum ocelot_vcap_bit tcp_psh;
- enum ocelot_vcap_bit tcp_ack;
- enum ocelot_vcap_bit tcp_urg;
- enum ocelot_vcap_bit sip_eq_dip; /* SIP equals DIP */
- enum ocelot_vcap_bit sport_eq_dport; /* SPORT equals DPORT */
- enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
-};
-
-enum ocelot_mask_mode {
- OCELOT_MASK_MODE_NONE,
- OCELOT_MASK_MODE_PERMIT_DENY,
- OCELOT_MASK_MODE_POLICY,
- OCELOT_MASK_MODE_REDIRECT,
-};
-
-enum ocelot_es0_tag {
- OCELOT_NO_ES0_TAG,
- OCELOT_ES0_TAG,
- OCELOT_FORCE_PORT_TAG,
- OCELOT_FORCE_UNTAG,
-};
-
-enum ocelot_tag_tpid_sel {
- OCELOT_TAG_TPID_SEL_8021Q,
- OCELOT_TAG_TPID_SEL_8021AD,
-};
-
-struct ocelot_vcap_action {
- union {
- /* VCAP ES0 */
- struct {
- enum ocelot_es0_tag push_outer_tag;
- enum ocelot_es0_tag push_inner_tag;
- enum ocelot_tag_tpid_sel tag_a_tpid_sel;
- int tag_a_vid_sel;
- int tag_a_pcp_sel;
- u16 vid_a_val;
- u8 pcp_a_val;
- u8 dei_a_val;
- enum ocelot_tag_tpid_sel tag_b_tpid_sel;
- int tag_b_vid_sel;
- int tag_b_pcp_sel;
- u16 vid_b_val;
- u8 pcp_b_val;
- u8 dei_b_val;
- };
-
- /* VCAP IS1 */
- struct {
- bool vid_replace_ena;
- u16 vid;
- bool vlan_pop_cnt_ena;
- int vlan_pop_cnt;
- bool pcp_dei_ena;
- u8 pcp;
- u8 dei;
- bool qos_ena;
- u8 qos_val;
- u8 pag_override_mask;
- u8 pag_val;
- };
-
- /* VCAP IS2 */
- struct {
- bool cpu_copy_ena;
- u8 cpu_qu_num;
- enum ocelot_mask_mode mask_mode;
- unsigned long port_mask;
- bool police_ena;
- struct ocelot_policer pol;
- u32 pol_ix;
- };
- };
-};
-
-struct ocelot_vcap_stats {
- u64 bytes;
- u64 pkts;
- u64 used;
-};
-
-enum ocelot_vcap_filter_type {
- OCELOT_VCAP_FILTER_DUMMY,
- OCELOT_VCAP_FILTER_PAG,
- OCELOT_VCAP_FILTER_OFFLOAD,
-};
-
-struct ocelot_vcap_filter {
- struct list_head list;
-
- enum ocelot_vcap_filter_type type;
- int block_id;
- int goto_target;
- int lookup;
- u8 pag;
- u16 prio;
- u32 id;
-
- struct ocelot_vcap_action action;
- struct ocelot_vcap_stats stats;
- /* For VCAP IS1 and IS2 */
- unsigned long ingress_port_mask;
- /* For VCAP ES0 */
- struct ocelot_vcap_port ingress_port;
- struct ocelot_vcap_port egress_port;
-
- enum ocelot_vcap_bit dmac_mc;
- enum ocelot_vcap_bit dmac_bc;
- struct ocelot_vcap_key_vlan vlan;
-
- enum ocelot_vcap_key_type key_type;
- union {
- /* OCELOT_VCAP_KEY_ANY: No specific fields */
- struct ocelot_vcap_key_etype etype;
- struct ocelot_vcap_key_llc llc;
- struct ocelot_vcap_key_snap snap;
- struct ocelot_vcap_key_arp arp;
- struct ocelot_vcap_key_ipv4 ipv4;
- struct ocelot_vcap_key_ipv6 ipv6;
- } key;
-};
-
-int ocelot_vcap_filter_add(struct ocelot *ocelot,
- struct ocelot_vcap_filter *rule,
- struct netlink_ext_ack *extack);
-int ocelot_vcap_filter_del(struct ocelot *ocelot,
- struct ocelot_vcap_filter *rule);
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
-struct ocelot_vcap_filter *
-ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id);
void ocelot_detect_vcap_constants(struct ocelot *ocelot);
int ocelot_vcap_init(struct ocelot *ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 9cf2bc5f4289..4bd7e9d9ec61 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -4,6 +4,7 @@
*
* Copyright (c) 2017 Microsemi Corporation
*/
+#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of_net.h>
@@ -18,8 +19,6 @@
#include <soc/mscc/ocelot_hsio.h>
#include "ocelot.h"
-#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
-
static const u32 ocelot_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
REG(ANA_VLANMASK, 0x009004),
@@ -517,7 +516,6 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
ocelot->map = ocelot_regmap;
ocelot->stats_layout = ocelot_stats_layout;
ocelot->num_stats = ARRAY_SIZE(ocelot_stats_layout);
- ocelot->shared_queue_sz = 224 * 1024;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
@@ -533,181 +531,28 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
return 0;
}
-static int ocelot_parse_ifh(u32 *_ifh, struct frame_info *info)
-{
- u8 llen, wlen;
- u64 ifh[2];
-
- ifh[0] = be64_to_cpu(((__force __be64 *)_ifh)[0]);
- ifh[1] = be64_to_cpu(((__force __be64 *)_ifh)[1]);
-
- wlen = IFH_EXTRACT_BITFIELD64(ifh[0], 7, 8);
- llen = IFH_EXTRACT_BITFIELD64(ifh[0], 15, 6);
-
- info->len = OCELOT_BUFFER_CELL_SZ * wlen + llen - 80;
-
- info->timestamp = IFH_EXTRACT_BITFIELD64(ifh[0], 21, 32);
-
- info->port = IFH_EXTRACT_BITFIELD64(ifh[1], 43, 4);
-
- info->tag_type = IFH_EXTRACT_BITFIELD64(ifh[1], 16, 1);
- info->vid = IFH_EXTRACT_BITFIELD64(ifh[1], 0, 12);
-
- return 0;
-}
-
-static int ocelot_rx_frame_word(struct ocelot *ocelot, u8 grp, bool ifh,
- u32 *rval)
-{
- u32 val;
- u32 bytes_valid;
-
- val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
- if (val == XTR_NOT_READY) {
- if (ifh)
- return -EIO;
-
- do {
- val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
- } while (val == XTR_NOT_READY);
- }
-
- switch (val) {
- case XTR_ABORT:
- return -EIO;
- case XTR_EOF_0:
- case XTR_EOF_1:
- case XTR_EOF_2:
- case XTR_EOF_3:
- case XTR_PRUNED:
- bytes_valid = XTR_VALID_BYTES(val);
- val = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
- if (val == XTR_ESCAPE)
- *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
- else
- *rval = val;
-
- return bytes_valid;
- case XTR_ESCAPE:
- *rval = ocelot_read_rix(ocelot, QS_XTR_RD, grp);
-
- return 4;
- default:
- *rval = val;
-
- return 4;
- }
-}
-
static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
{
struct ocelot *ocelot = arg;
- int i = 0, grp = 0;
- int err = 0;
-
- if (!(ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)))
- return IRQ_NONE;
+ int grp = 0, err;
- do {
- struct skb_shared_hwtstamps *shhwtstamps;
- struct ocelot_port_private *priv;
- struct ocelot_port *ocelot_port;
- u64 tod_in_ns, full_ts_in_ns;
- struct frame_info info = {};
- struct net_device *dev;
- u32 ifh[4], val, *buf;
- struct timespec64 ts;
- int sz, len, buf_len;
+ while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
- for (i = 0; i < OCELOT_TAG_LEN / 4; i++) {
- err = ocelot_rx_frame_word(ocelot, grp, true, &ifh[i]);
- if (err != 4)
- break;
- }
-
- if (err != 4)
- break;
-
- /* At this point the IFH was read correctly, so it is safe to
- * presume that there is no error. The err needs to be reset
- * otherwise a frame could come in CPU queue between the while
- * condition and the check for error later on. And in that case
- * the new frame is just removed and not processed.
- */
- err = 0;
-
- ocelot_parse_ifh(ifh, &info);
-
- ocelot_port = ocelot->ports[info.port];
- priv = container_of(ocelot_port, struct ocelot_port_private,
- port);
- dev = priv->dev;
-
- skb = netdev_alloc_skb(dev, info.len);
-
- if (unlikely(!skb)) {
- netdev_err(dev, "Unable to allocate sk_buff\n");
- err = -ENOMEM;
- break;
- }
- buf_len = info.len - ETH_FCS_LEN;
- buf = (u32 *)skb_put(skb, buf_len);
-
- len = 0;
- do {
- sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
- *buf++ = val;
- len += sz;
- } while (len < buf_len);
-
- /* Read the FCS */
- sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
- /* Update the statistics if part of the FCS was read before */
- len -= ETH_FCS_LEN - sz;
-
- if (unlikely(dev->features & NETIF_F_RXFCS)) {
- buf = (u32 *)skb_put(skb, ETH_FCS_LEN);
- *buf = val;
- }
-
- if (sz < 0) {
- err = sz;
- break;
- }
-
- if (ocelot->ptp) {
- ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
-
- tod_in_ns = ktime_set(ts.tv_sec, ts.tv_nsec);
- if ((tod_in_ns & 0xffffffff) < info.timestamp)
- full_ts_in_ns = (((tod_in_ns >> 32) - 1) << 32) |
- info.timestamp;
- else
- full_ts_in_ns = (tod_in_ns & GENMASK_ULL(63, 32)) |
- info.timestamp;
-
- shhwtstamps = skb_hwtstamps(skb);
- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamps->hwtstamp = full_ts_in_ns;
- }
+ err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
+ if (err)
+ goto out;
- /* Everything we see on an interface that is in the HW bridge
- * has already been forwarded.
- */
- if (ocelot->bridge_mask & BIT(info.port))
- skb->offload_fwd_mark = 1;
+ skb->dev->stats.rx_bytes += skb->len;
+ skb->dev->stats.rx_packets++;
- skb->protocol = eth_type_trans(skb, dev);
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
- dev->stats.rx_bytes += len;
- dev->stats.rx_packets++;
- } while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp));
+ }
- if (err)
- while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
- ocelot_read_rix(ocelot, QS_XTR_RD, grp);
+out:
+ if (err < 0)
+ ocelot_drain_cpu_queue(ocelot, 0);
return IRQ_HANDLED;
}
@@ -764,9 +609,25 @@ static u16 ocelot_wm_enc(u16 value)
return value;
}
+static u16 ocelot_wm_dec(u16 wm)
+{
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+
+static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
.port_to_netdev = ocelot_port_to_netdev,
.netdev_to_port = ocelot_netdev_to_port,
};
@@ -1036,12 +897,19 @@ static struct ptp_clock_info ocelot_ptp_clock_info = {
.enable = ocelot_ptp_enable,
};
+static void mscc_ocelot_teardown_devlink_ports(struct ocelot *ocelot)
+{
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_port_devlink_teardown(ocelot, port);
+}
+
static void mscc_ocelot_release_ports(struct ocelot *ocelot)
{
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
ocelot_port = ocelot->ports[port];
@@ -1049,12 +917,7 @@ static void mscc_ocelot_release_ports(struct ocelot *ocelot)
continue;
ocelot_deinit_port(ocelot, port);
-
- priv = container_of(ocelot_port, struct ocelot_port_private,
- port);
-
- unregister_netdev(priv->dev);
- free_netdev(priv->dev);
+ ocelot_release_port(ocelot_port);
}
}
@@ -1062,36 +925,55 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
struct device_node *ports)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ u32 devlink_ports_registered = 0;
struct device_node *portnp;
- int err;
+ int port, err;
+ u32 reg;
ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
sizeof(struct ocelot_port *), GFP_KERNEL);
if (!ocelot->ports)
return -ENOMEM;
+ ocelot->devlink_ports = devm_kcalloc(ocelot->dev,
+ ocelot->num_phys_ports,
+ sizeof(*ocelot->devlink_ports),
+ GFP_KERNEL);
+ if (!ocelot->devlink_ports)
+ return -ENOMEM;
+
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
struct device_node *phy_node;
+ struct devlink_port *dlp;
phy_interface_t phy_mode;
struct phy_device *phy;
struct regmap *target;
struct resource *res;
struct phy *serdes;
char res_name[8];
- u32 port;
- if (of_property_read_u32(portnp, "reg", &port))
+ if (of_property_read_u32(portnp, "reg", &reg))
+ continue;
+
+ port = reg;
+ if (port < 0 || port >= ocelot->num_phys_ports) {
+ dev_err(ocelot->dev,
+ "invalid port number: %d >= %d\n", port,
+ ocelot->num_phys_ports);
continue;
+ }
snprintf(res_name, sizeof(res_name), "port%d", port);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
res_name);
target = ocelot_regmap_init(ocelot, res);
- if (IS_ERR(target))
- continue;
+ if (IS_ERR(target)) {
+ err = PTR_ERR(target);
+ goto out_teardown;
+ }
phy_node = of_parse_phandle(portnp, "phy-handle", 0);
if (!phy_node)
@@ -1102,15 +984,25 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
if (!phy)
continue;
+ err = ocelot_port_devlink_init(ocelot, port,
+ DEVLINK_PORT_FLAVOUR_PHYSICAL);
+ if (err) {
+ of_node_put(portnp);
+ goto out_teardown;
+ }
+ devlink_ports_registered |= BIT(port);
+
err = ocelot_probe_port(ocelot, port, target, phy);
if (err) {
of_node_put(portnp);
- return err;
+ goto out_teardown;
}
ocelot_port = ocelot->ports[port];
priv = container_of(ocelot_port, struct ocelot_port_private,
port);
+ dlp = &ocelot->devlink_ports[port];
+ devlink_port_type_eth_set(dlp, priv->dev);
of_get_phy_mode(portnp, &phy_mode);
@@ -1135,7 +1027,8 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
of_node_put(portnp);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_teardown;
}
serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
@@ -1149,13 +1042,36 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
port);
of_node_put(portnp);
- return err;
+ goto out_teardown;
}
priv->serdes = serdes;
}
+ /* Initialize unused devlink ports at the end */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (devlink_ports_registered & BIT(port))
+ continue;
+
+ err = ocelot_port_devlink_init(ocelot, port,
+ DEVLINK_PORT_FLAVOUR_UNUSED);
+ if (err)
+ goto out_teardown;
+
+ devlink_ports_registered |= BIT(port);
+ }
+
return 0;
+
+out_teardown:
+ /* Unregister the network interfaces */
+ mscc_ocelot_release_ports(ocelot);
+ /* Tear down devlink ports for the registered network interfaces */
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ if (devlink_ports_registered & BIT(port))
+ ocelot_port_devlink_teardown(ocelot, port);
+ }
+ return err;
}
static int mscc_ocelot_probe(struct platform_device *pdev)
@@ -1163,6 +1079,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
int err, irq_xtr, irq_ptp_rdy;
struct device_node *ports;
+ struct devlink *devlink;
struct ocelot *ocelot;
struct regmap *hsio;
unsigned int i;
@@ -1186,10 +1103,12 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (!np && !pdev->dev.platform_data)
return -ENODEV;
- ocelot = devm_kzalloc(&pdev->dev, sizeof(*ocelot), GFP_KERNEL);
- if (!ocelot)
+ devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot));
+ if (!devlink)
return -ENOMEM;
+ ocelot = devlink_priv(devlink);
+ ocelot->devlink = priv_to_devlink(ocelot);
platform_set_drvdata(pdev, ocelot);
ocelot->dev = &pdev->dev;
@@ -1206,7 +1125,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->targets[io_target[i].id] = NULL;
continue;
}
- return PTR_ERR(target);
+ err = PTR_ERR(target);
+ goto out_free_devlink;
}
ocelot->targets[io_target[i].id] = target;
@@ -1215,24 +1135,27 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
hsio = syscon_regmap_lookup_by_compatible("mscc,ocelot-hsio");
if (IS_ERR(hsio)) {
dev_err(&pdev->dev, "missing hsio syscon\n");
- return PTR_ERR(hsio);
+ err = PTR_ERR(hsio);
+ goto out_free_devlink;
}
ocelot->targets[HSIO] = hsio;
err = ocelot_chip_init(ocelot, &ocelot_ops);
if (err)
- return err;
+ goto out_free_devlink;
irq_xtr = platform_get_irq_byname(pdev, "xtr");
- if (irq_xtr < 0)
- return -ENODEV;
+ if (irq_xtr < 0) {
+ err = irq_xtr;
+ goto out_free_devlink;
+ }
err = devm_request_threaded_irq(&pdev->dev, irq_xtr, NULL,
ocelot_xtr_irq_handler, IRQF_ONESHOT,
"frame extraction", ocelot);
if (err)
- return err;
+ goto out_free_devlink;
irq_ptp_rdy = platform_get_irq_byname(pdev, "ptp_rdy");
if (irq_ptp_rdy > 0 && ocelot->targets[PTP]) {
@@ -1241,7 +1164,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
IRQF_ONESHOT, "ptp ready",
ocelot);
if (err)
- return err;
+ goto out_free_devlink;
/* Both the PTP interrupt and the PTP bank are available */
ocelot->ptp = 1;
@@ -1250,25 +1173,32 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ports = of_get_child_by_name(np, "ethernet-ports");
if (!ports) {
dev_err(ocelot->dev, "no ethernet-ports child node found\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_free_devlink;
}
ocelot->num_phys_ports = of_get_child_count(ports);
ocelot->num_flooding_pgids = 1;
ocelot->vcap = vsc7514_vcap_props;
- ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
- ocelot->xtr_prefix = OCELOT_TAG_PREFIX_NONE;
ocelot->npi = -1;
err = ocelot_init(ocelot);
if (err)
goto out_put_ports;
- err = mscc_ocelot_init_ports(pdev, ports);
+ err = devlink_register(devlink, ocelot->dev);
if (err)
goto out_ocelot_deinit;
+ err = mscc_ocelot_init_ports(pdev, ports);
+ if (err)
+ goto out_ocelot_devlink_unregister;
+
+ err = ocelot_devlink_sb_register(ocelot);
+ if (err)
+ goto out_ocelot_release_ports;
+
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
if (err) {
@@ -1288,10 +1218,17 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
return 0;
+out_ocelot_release_ports:
+ mscc_ocelot_release_ports(ocelot);
+ mscc_ocelot_teardown_devlink_ports(ocelot);
+out_ocelot_devlink_unregister:
+ devlink_unregister(devlink);
out_ocelot_deinit:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
+out_free_devlink:
+ devlink_free(devlink);
return err;
}
@@ -1300,11 +1237,15 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
struct ocelot *ocelot = platform_get_drvdata(pdev);
ocelot_deinit_timestamp(ocelot);
+ ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
+ mscc_ocelot_teardown_devlink_ports(ocelot);
+ devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
unregister_netdevice_notifier(&ocelot_netdevice_nb);
+ devlink_free(ocelot->devlink);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 0a721f6e8676..e31f8fbbc696 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -3109,13 +3109,19 @@ mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
return 0;
}
-static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->insn.imm != BPF_ADD)
+ return -EOPNOTSUPP;
+
return mem_xadd(nfp_prog, meta, false);
}
-static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
+static int mem_atomic8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
+ if (meta->insn.imm != BPF_ADD)
+ return -EOPNOTSUPP;
+
return mem_xadd(nfp_prog, meta, true);
}
@@ -3475,8 +3481,8 @@ static const instr_cb_t instr_cb[256] = {
[BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
[BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
[BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
- [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
- [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
+ [BPF_STX | BPF_ATOMIC | BPF_W] = mem_atomic4,
+ [BPF_STX | BPF_ATOMIC | BPF_DW] = mem_atomic8,
[BPF_ST | BPF_MEM | BPF_B] = mem_st1,
[BPF_ST | BPF_MEM | BPF_H] = mem_st2,
[BPF_ST | BPF_MEM | BPF_W] = mem_st4,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index fac9c6f9e197..d0e17eebddd9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -428,9 +428,9 @@ static inline bool is_mbpf_classic_store_pkt(const struct nfp_insn_meta *meta)
return is_mbpf_classic_store(meta) && meta->ptr.type == PTR_TO_PACKET;
}
-static inline bool is_mbpf_xadd(const struct nfp_insn_meta *meta)
+static inline bool is_mbpf_atomic(const struct nfp_insn_meta *meta)
{
- return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_XADD);
+ return (meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_ATOMIC);
}
static inline bool is_mbpf_mul(const struct nfp_insn_meta *meta)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
index e92ee510fd52..9d235c0ce46a 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c
@@ -479,7 +479,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
pr_vlog(env, "map writes not supported\n");
return -EOPNOTSUPP;
}
- if (is_mbpf_xadd(meta)) {
+ if (is_mbpf_atomic(meta)) {
err = nfp_bpf_map_mark_used(env, meta, reg,
NFP_MAP_USE_ATOMIC_CNT);
if (err)
@@ -523,12 +523,17 @@ exit_check_ptr:
}
static int
-nfp_bpf_check_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
- struct bpf_verifier_env *env)
+nfp_bpf_check_atomic(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
+ struct bpf_verifier_env *env)
{
const struct bpf_reg_state *sreg = cur_regs(env) + meta->insn.src_reg;
const struct bpf_reg_state *dreg = cur_regs(env) + meta->insn.dst_reg;
+ if (meta->insn.imm != BPF_ADD) {
+ pr_vlog(env, "atomic op not implemented: %d\n", meta->insn.imm);
+ return -EOPNOTSUPP;
+ }
+
if (dreg->type != PTR_TO_MAP_VALUE) {
pr_vlog(env, "atomic add not to a map value pointer: %d\n",
dreg->type);
@@ -655,8 +660,8 @@ int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
if (is_mbpf_store(meta))
return nfp_bpf_check_store(nfp_prog, meta, env);
- if (is_mbpf_xadd(meta))
- return nfp_bpf_check_xadd(nfp_prog, meta, env);
+ if (is_mbpf_atomic(meta))
+ return nfp_bpf_check_atomic(nfp_prog, meta, env);
if (is_mbpf_alu(meta))
return nfp_bpf_check_alu(nfp_prog, meta, env);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f21fb573ea3e..eeb30680b4dc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1822,8 +1822,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
rcu_read_lock();
xdp_prog = READ_ONCE(dp->xdp_prog);
true_bufsz = xdp_prog ? PAGE_SIZE : dp->fl_bufsz;
- xdp.frame_sz = PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM;
- xdp.rxq = &rx_ring->xdp_rxq;
+ xdp_init_buff(&xdp, PAGE_SIZE - NFP_NET_RX_BUF_HEADROOM,
+ &rx_ring->xdp_rxq);
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) {
@@ -1914,10 +1914,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
unsigned int dma_off;
int act;
- xdp.data_hard_start = rxbuf->frag + NFP_NET_RX_BUF_HEADROOM;
- xdp.data = orig_data;
- xdp.data_meta = orig_data;
- xdp.data_end = orig_data + pkt_len;
+ xdp_prepare_buff(&xdp,
+ rxbuf->frag + NFP_NET_RX_BUF_HEADROOM,
+ pkt_off - NFP_NET_RX_BUF_HEADROOM,
+ pkt_len, true);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
@@ -3656,8 +3656,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
.ndo_get_phys_port_name = nfp_net_get_phys_port_name,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_bpf = nfp_net_xdp,
.ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 9bed42719ae5..563dba384a53 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -249,8 +249,6 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
return (usecs * mult) / div;
}
-typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
-
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index ac4cd5d82e69..162a1ff1e9d2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -979,7 +979,7 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
stats->vlan_inserted++;
}
- if (skb->csum_not_inet)
+ if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
else
stats->csum++;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 5e9f8ee99800..2fcbcecb41d1 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -113,7 +113,8 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
return NX_RCODE_INVALID_ARGS;
}
- addr = pci_zalloc_consistent(adapter->pdev, size, &md_template_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &md_template_addr, GFP_KERNEL);
if (!addr) {
dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
return -ENOMEM;
@@ -133,7 +134,7 @@ netxen_get_minidump_template(struct netxen_adapter *adapter)
dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
cmd.rsp.cmd, size, cmd.rsp.arg2);
}
- pci_free_consistent(adapter->pdev, size, addr, md_template_addr);
+ dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr);
return 0;
}
@@ -281,14 +282,14 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
rsp_size =
SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
- addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &hostrq_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
- addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &cardrsp_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL) {
err = -ENOMEM;
goto out_free_rq;
@@ -387,9 +388,10 @@ nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
recv_ctx->virt_port = prsp->virt_port;
out_free_rsp:
- pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err;
}
@@ -429,14 +431,14 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
struct netxen_cmd_args cmd;
rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
- rq_addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &rq_phys_addr);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
- rsp_addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &rsp_phys_addr);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
@@ -491,10 +493,11 @@ nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
err = -EIO;
}
- pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
return err;
}
@@ -745,9 +748,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
recv_ctx = &adapter->recv_ctx;
tx_ring = adapter->tx_ring;
- addr = pci_alloc_consistent(pdev,
- sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
- &recv_ctx->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ &recv_ctx->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate hw context\n");
return -ENOMEM;
@@ -762,8 +765,8 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
(__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
/* cmd desc ring */
- addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
@@ -776,9 +779,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
- RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"%s: failed to allocate rds ring [%d]\n",
@@ -797,9 +800,9 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
- STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"%s: failed to allocate sds ring [%d]\n",
@@ -874,19 +877,17 @@ done:
recv_ctx = &adapter->recv_ctx;
if (recv_ctx->hwctx != NULL) {
- pci_free_consistent(adapter->pdev,
- sizeof(struct netxen_ring_ctx) +
- sizeof(uint32_t),
- recv_ctx->hwctx,
- recv_ctx->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ recv_ctx->hwctx, recv_ctx->phys_addr);
recv_ctx->hwctx = NULL;
}
tx_ring = adapter->tx_ring;
if (tx_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- TX_DESC_RINGSIZE(tx_ring),
- tx_ring->desc_head, tx_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
tx_ring->desc_head = NULL;
}
@@ -894,10 +895,10 @@ done:
rds_ring = &recv_ctx->rds_rings[ring];
if (rds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- RCV_DESC_RINGSIZE(rds_ring),
- rds_ring->desc_head,
- rds_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
rds_ring->desc_head = NULL;
}
}
@@ -906,10 +907,10 @@ done:
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
- STATUS_DESC_RINGSIZE(sds_ring),
- sds_ring->desc_head,
- sds_ring->phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
sds_ring->desc_head = NULL;
}
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 94546ed5f867..08f9477d2ee8 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -102,10 +102,8 @@ void netxen_release_rx_buffers(struct netxen_adapter *adapter)
rx_buf = &(rds_ring->rx_buf_arr[i]);
if (rx_buf->state == NETXEN_BUFFER_FREE)
continue;
- pci_unmap_single(adapter->pdev,
- rx_buf->dma,
- rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pdev->dev, rx_buf->dma,
+ rds_ring->dma_size, DMA_FROM_DEVICE);
if (rx_buf->skb != NULL)
dev_kfree_skb_any(rx_buf->skb);
}
@@ -124,16 +122,16 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
if (buffrag->dma) {
- pci_unmap_single(adapter->pdev, buffrag->dma,
- buffrag->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffrag->dma,
+ buffrag->length, DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
- pci_unmap_page(adapter->pdev, buffrag->dma,
- buffrag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ buffrag->dma, buffrag->length,
+ DMA_TO_DEVICE);
buffrag->dma = 0ULL;
}
}
@@ -1250,9 +1248,10 @@ int netxen_init_dummy_dma(struct netxen_adapter *adapter)
if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
- adapter->dummy_dma.addr = pci_alloc_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- &adapter->dummy_dma.phys_addr);
+ adapter->dummy_dma.addr = dma_alloc_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ &adapter->dummy_dma.phys_addr,
+ GFP_KERNEL);
if (adapter->dummy_dma.addr == NULL) {
dev_err(&adapter->pdev->dev,
"ERROR: Could not allocate dummy DMA memory\n");
@@ -1304,10 +1303,10 @@ void netxen_free_dummy_dma(struct netxen_adapter *adapter)
}
if (i) {
- pci_free_consistent(adapter->pdev,
- NETXEN_HOST_DUMMY_DMA_SIZE,
- adapter->dummy_dma.addr,
- adapter->dummy_dma.phys_addr);
+ dma_free_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ adapter->dummy_dma.addr,
+ adapter->dummy_dma.phys_addr);
adapter->dummy_dma.addr = NULL;
} else
dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
@@ -1467,10 +1466,10 @@ netxen_alloc_rx_skb(struct netxen_adapter *adapter,
if (!adapter->ahw.cut_through)
skb_reserve(skb, 2);
- dma = pci_map_single(pdev, skb->data,
- rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+ dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(pdev, dma)) {
+ if (dma_mapping_error(&pdev->dev, dma)) {
dev_kfree_skb_any(skb);
buffer->skb = NULL;
return 1;
@@ -1491,8 +1490,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
buffer = &rds_ring->rx_buf_arr[index];
- pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pdev->dev, buffer->dma, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
skb = buffer->skb;
if (!skb)
@@ -1754,13 +1753,13 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
buffer = &tx_ring->cmd_buf_arr[sw_consumer];
if (buffer->skb) {
frag = &buffer->frag_array[0];
- pci_unmap_single(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, frag->dma, frag->length,
+ DMA_TO_DEVICE);
frag->dma = 0ULL;
for (i = 1; i < buffer->frag_count; i++) {
frag++; /* Get the next frag */
- pci_unmap_page(pdev, frag->dma, frag->length,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, frag->dma,
+ frag->length, DMA_TO_DEVICE);
frag->dma = 0ULL;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index d258e0ccf946..7e6bac85495d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -243,8 +243,8 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter)
cmask = mask;
}
- if (pci_set_dma_mask(pdev, mask) == 0 &&
- pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ if (dma_set_mask(&pdev->dev, mask) == 0 &&
+ dma_set_coherent_mask(&pdev->dev, cmask) == 0) {
adapter->pci_using_dac = 1;
return 0;
}
@@ -277,13 +277,13 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
mask = DMA_BIT_MASK(32+shift);
- err = pci_set_dma_mask(pdev, mask);
+ err = dma_set_mask(&pdev->dev, mask);
if (err)
goto err_out;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- err = pci_set_consistent_dma_mask(pdev, mask);
+ err = dma_set_coherent_mask(&pdev->dev, mask);
if (err)
goto err_out;
}
@@ -293,8 +293,8 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
return 0;
err_out:
- pci_set_dma_mask(pdev, old_mask);
- pci_set_consistent_dma_mask(pdev, old_cmask);
+ dma_set_mask(&pdev->dev, old_mask);
+ dma_set_coherent_mask(&pdev->dev, old_cmask);
return err;
}
@@ -1978,9 +1978,9 @@ netxen_map_tx_skb(struct pci_dev *pdev,
nr_frags = skb_shinfo(skb)->nr_frags;
nf = &pbuf->frag_array[0];
- map = pci_map_single(pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, map))
+ map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
goto out_err;
nf->dma = map;
@@ -2004,12 +2004,12 @@ netxen_map_tx_skb(struct pci_dev *pdev,
unwind:
while (--i >= 0) {
nf = &pbuf->frag_array[i+1];
- pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
nf->dma = 0ULL;
}
nf = &pbuf->frag_array[0];
- pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
nf->dma = 0ULL;
out_err:
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 3efc5899f656..2e62a2c4eb63 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -168,6 +168,12 @@ struct qede_dump_info {
u32 args[QEDE_DUMP_MAX_ARGS];
};
+struct qede_coalesce {
+ bool isvalid;
+ u16 rxc;
+ u16 txc;
+};
+
struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
@@ -194,6 +200,7 @@ struct qede_dev {
((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
struct qede_fastpath *fp_array;
+ struct qede_coalesce *coal_entry;
u8 req_num_tx;
u8 fp_num_tx;
u8 req_num_rx;
@@ -581,6 +588,9 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f);
void qede_forced_speed_maps_init(void);
+int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal);
+int qede_set_per_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index bedbb85a179a..1560ad3d9290 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -819,8 +819,7 @@ out:
return rc;
}
-static int qede_set_coalesce(struct net_device *dev,
- struct ethtool_coalesce *coal)
+int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
{
struct qede_dev *edev = netdev_priv(dev);
struct qede_fastpath *fp;
@@ -855,6 +854,8 @@ static int qede_set_coalesce(struct net_device *dev,
"Set RX coalesce error, rc = %d\n", rc);
return rc;
}
+ edev->coal_entry[i].rxc = rxc;
+ edev->coal_entry[i].isvalid = true;
}
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
@@ -874,6 +875,8 @@ static int qede_set_coalesce(struct net_device *dev,
"Set TX coalesce error, rc = %d\n", rc);
return rc;
}
+ edev->coal_entry[i].txc = txc;
+ edev->coal_entry[i].isvalid = true;
}
}
@@ -2105,6 +2108,129 @@ err:
return rc;
}
+int qede_set_per_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ u16 rxc, txc;
+ int rc = 0;
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+ : "tx",
+ QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+
+ __qede_lock(edev);
+ if (queue >= edev->num_queues) {
+ DP_INFO(edev, "Invalid queue\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+
+ if (edev->fp_array[queue].type & QEDE_FASTPATH_RX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ rxc, 0,
+ fp->rxq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set RX coalesce error, rc = %d\n", rc);
+ goto out;
+ }
+ edev->coal_entry[queue].rxc = rxc;
+ edev->coal_entry[queue].isvalid = true;
+ }
+
+ if (edev->fp_array[queue].type & QEDE_FASTPATH_TX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ 0, txc,
+ fp->txq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set TX coalesce error, rc = %d\n", rc);
+ goto out;
+ }
+ edev->coal_entry[queue].txc = txc;
+ edev->coal_entry[queue].isvalid = true;
+ }
+out:
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+static int qede_get_per_coalesce(struct net_device *dev,
+ u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ void *rx_handle = NULL, *tx_handle = NULL;
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ u16 rx_coal, tx_coal;
+ int rc = 0;
+
+ rx_coal = QED_DEFAULT_RX_USECS;
+ tx_coal = QED_DEFAULT_TX_USECS;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ __qede_lock(edev);
+ if (queue >= edev->num_queues) {
+ DP_INFO(edev, "Invalid queue\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+
+ if (fp->type & QEDE_FASTPATH_RX)
+ rx_handle = fp->rxq->handle;
+
+ rc = edev->ops->get_coalesce(edev->cdev, &rx_coal,
+ rx_handle);
+ if (rc) {
+ DP_INFO(edev, "Read Rx coalesce error\n");
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+ if (fp->type & QEDE_FASTPATH_TX)
+ tx_handle = fp->txq->handle;
+
+ rc = edev->ops->get_coalesce(edev->cdev, &tx_coal,
+ tx_handle);
+ if (rc)
+ DP_INFO(edev, "Read Tx coalesce error\n");
+
+out:
+ __qede_unlock(edev);
+
+ coal->rx_coalesce_usecs = rx_coal;
+ coal->tx_coalesce_usecs = tx_coal;
+
+ return rc;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_link_ksettings = qede_get_link_ksettings,
@@ -2148,6 +2274,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_fecparam = qede_set_fecparam,
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
+ .get_per_queue_coalesce = qede_get_per_coalesce,
+ .set_per_queue_coalesce = qede_set_per_coalesce,
.flash_device = qede_flash_device,
.get_dump_flag = qede_get_dump_flag,
.get_dump_data = qede_get_dump_data,
@@ -2177,6 +2305,8 @@ static const struct ethtool_ops qede_vf_ethtool_ops = {
.set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
+ .get_per_queue_coalesce = qede_get_per_coalesce,
+ .set_per_queue_coalesce = qede_set_per_coalesce,
.get_tunable = qede_get_tunable,
.set_tunable = qede_set_tunable,
};
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index ca0ee29a57b5..8c47a9d2a965 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1090,12 +1090,9 @@ static bool qede_rx_xdp(struct qede_dev *edev,
struct xdp_buff xdp;
enum xdp_action act;
- xdp.data_hard_start = page_address(bd->data);
- xdp.data = xdp.data_hard_start + *data_offset;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + *len;
- xdp.rxq = &rxq->xdp_rxq;
- xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
+ xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
+ *len, false);
/* Queues always have a full reset currently, so for the time
* being until there's atomic program replace just mark read
@@ -1453,7 +1450,8 @@ int qede_poll(struct napi_struct *napi, int budget)
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
- if (rx_work_done < budget) {
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (rx_work_done < budget || !budget) {
if (!qede_poll_is_more_work(fp)) {
napi_complete_done(napi, rx_work_done);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9cf960a6d007..4d952036ba82 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -663,8 +663,6 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_get_vf_config = qede_get_vf_config,
.ndo_set_vf_rate = qede_set_vf_rate,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
#ifdef CONFIG_RFS_ACCEL
@@ -688,8 +686,6 @@ static const struct net_device_ops qede_netdev_vf_ops = {
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
};
@@ -707,8 +703,6 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
.ndo_fix_features = qede_fix_features,
.ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qede_features_check,
.ndo_bpf = qede_xdp,
.ndo_xdp_xmit = qede_xdp_transmit,
@@ -910,6 +904,7 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
{
u8 fp_combined, fp_rx = edev->fp_num_rx;
struct qede_fastpath *fp;
+ void *mem;
int i;
edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
@@ -919,6 +914,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
+ mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
+ sizeof(*edev->coal_entry), GFP_KERNEL);
+ if (!mem) {
+ DP_ERR(edev, "coalesce entry allocation failed\n");
+ kfree(edev->coal_entry);
+ goto err;
+ }
+ edev->coal_entry = mem;
+
fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
/* Allocate the FP elements for Rx queues followed by combined and then
@@ -1326,8 +1330,10 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
* [e.g., QED register callbacks] won't break anything when
* accessing the netdevice.
*/
- if (mode != QEDE_REMOVE_RECOVERY)
+ if (mode != QEDE_REMOVE_RECOVERY) {
+ kfree(edev->coal_entry);
free_netdev(ndev);
+ }
dev_info(&pdev->dev, "Ending qede_remove successfully\n");
}
@@ -2334,8 +2340,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
bool is_locked)
{
struct qed_link_params link_params;
+ struct ethtool_coalesce coal = {};
u8 num_tc;
- int rc;
+ int rc, i;
DP_INFO(edev, "Starting qede load\n");
@@ -2396,6 +2403,18 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
edev->state = QEDE_STATE_OPEN;
+ coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
+ coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
+
+ for_each_queue(i) {
+ if (edev->coal_entry[i].isvalid) {
+ coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
+ coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
+ }
+ __qede_unlock(edev);
+ qede_set_per_coalesce(edev->ndev, i, &coal);
+ __qede_lock(edev);
+ }
DP_INFO(edev, "Ending successfully qede load\n");
goto out;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 27740c027681..214e347097a7 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -315,12 +315,11 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
* buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1802,13 +1801,12 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
* first buffer
*/
skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
+ map = dma_map_single(&qdev->pdev->dev,
lrg_buf_cb->skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -1943,18 +1941,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
goto invalid_seg_count;
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
tx_cb->seg_count--;
if (tx_cb->seg_count) {
for (i = 1; i < tx_cb->seg_count; i++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[i],
- mapaddr),
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[i], mapaddr),
dma_unmap_len(&tx_cb->map[i], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
}
qdev->ndev->stats.tx_packets++;
@@ -2021,10 +2017,9 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
qdev->ndev->stats.rx_bytes += length;
skb_put(skb, length);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb->data);
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, qdev->ndev);
@@ -2067,10 +2062,9 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
skb2 = lrg_buf_cb2->skb;
skb_put(skb2, length); /* Just the second buffer length here. */
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb2, mapaddr),
- dma_unmap_len(lrg_buf_cb2, maplen),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
prefetch(skb2->data);
skb_checksum_none_assert(skb2);
@@ -2319,9 +2313,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
/*
* Map the skb buffer first.
*/
- map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
err);
@@ -2357,11 +2351,11 @@ static int ql_send_map(struct ql3_adapter *qdev,
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- map = pci_map_single(qdev->pdev, oal,
+ map = dma_map_single(&qdev->pdev->dev, oal,
sizeof(struct oal),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping outbound address list with error: %d\n",
@@ -2423,24 +2417,24 @@ map_error:
(seg == 7 && seg_cnt > 8) ||
(seg == 12 && seg_cnt > 13) ||
(seg == 17 && seg_cnt > 18)) {
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[seg], mapaddr),
- dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ DMA_TO_DEVICE);
oal++;
seg++;
}
- pci_unmap_page(qdev->pdev,
+ dma_unmap_page(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[seg], mapaddr),
dma_unmap_len(&tx_cb->map[seg], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(&tx_cb->map[0], mapaddr),
dma_unmap_addr(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return NETDEV_TX_BUSY;
@@ -2525,9 +2519,8 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
wmb();
qdev->req_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->req_q_size,
- &qdev->req_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ &qdev->req_q_phy_addr, GFP_KERNEL);
if ((qdev->req_q_virt_addr == NULL) ||
LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
@@ -2536,16 +2529,14 @@ static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
}
qdev->rsp_q_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- (size_t) qdev->rsp_q_size,
- &qdev->rsp_q_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr, GFP_KERNEL);
if ((qdev->rsp_q_virt_addr == NULL) ||
LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
netdev_err(qdev->ndev, "rspQ allocation failed\n");
- pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
- qdev->req_q_virt_addr,
- qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
return -ENOMEM;
}
@@ -2561,15 +2552,13 @@ static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
return;
}
- pci_free_consistent(qdev->pdev,
- qdev->req_q_size,
- qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
qdev->req_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->rsp_q_size,
- qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
qdev->rsp_q_virt_addr = NULL;
@@ -2593,9 +2582,9 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
return -ENOMEM;
qdev->lrg_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- &qdev->lrg_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "lBufQ failed\n");
@@ -2613,15 +2602,16 @@ static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
qdev->small_buf_q_alloc_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- &qdev->small_buf_q_alloc_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
if (qdev->small_buf_q_alloc_virt_addr == NULL) {
netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
- pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
return -ENOMEM;
}
@@ -2638,17 +2628,15 @@ static void ql_free_buffer_queues(struct ql3_adapter *qdev)
return;
}
kfree(qdev->lrg_buf);
- pci_free_consistent(qdev->pdev,
- qdev->lrg_buf_q_alloc_size,
- qdev->lrg_buf_q_alloc_virt_addr,
- qdev->lrg_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
qdev->lrg_buf_q_virt_addr = NULL;
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_q_alloc_size,
- qdev->small_buf_q_alloc_virt_addr,
- qdev->small_buf_q_alloc_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
qdev->small_buf_q_virt_addr = NULL;
@@ -2666,9 +2654,9 @@ static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
QL_SMALL_BUFFER_SIZE);
qdev->small_buf_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- &qdev->small_buf_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr, GFP_KERNEL);
if (qdev->small_buf_virt_addr == NULL) {
netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
@@ -2701,10 +2689,10 @@ static void ql_free_small_buffers(struct ql3_adapter *qdev)
return;
}
if (qdev->small_buf_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- qdev->small_buf_total_size,
- qdev->small_buf_virt_addr,
- qdev->small_buf_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
qdev->small_buf_virt_addr = NULL;
}
@@ -2719,10 +2707,10 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
lrg_buf_cb = &qdev->lrg_buf[i];
if (lrg_buf_cb->skb) {
dev_kfree_skb(lrg_buf_cb->skb);
- pci_unmap_single(qdev->pdev,
+ dma_unmap_single(&qdev->pdev->dev,
dma_unmap_addr(lrg_buf_cb, mapaddr),
dma_unmap_len(lrg_buf_cb, maplen),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
} else {
break;
@@ -2774,13 +2762,11 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
* buffer
*/
skb_reserve(skb, QL_HEADER_SPACE);
- map = pci_map_single(qdev->pdev,
- skb->data,
- qdev->lrg_buffer_len -
- QL_HEADER_SPACE,
- PCI_DMA_FROMDEVICE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data,
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
- err = pci_dma_mapping_error(qdev->pdev, map);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
if (err) {
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
@@ -2865,8 +2851,8 @@ static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
* Network Completion Queue Producer Index Register
*/
qdev->shadow_reg_virt_addr =
- pci_alloc_consistent(qdev->pdev,
- PAGE_SIZE, &qdev->shadow_reg_phy_addr);
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->shadow_reg_phy_addr, GFP_KERNEL);
if (qdev->shadow_reg_virt_addr != NULL) {
qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
@@ -2921,10 +2907,9 @@ err_small_buffers:
err_buffer_queues:
ql_free_net_req_rsp_queues(qdev);
err_req_rsp:
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
return -ENOMEM;
}
@@ -2937,10 +2922,9 @@ static void ql_free_mem_resources(struct ql3_adapter *qdev)
ql_free_buffer_queues(qdev);
ql_free_net_req_rsp_queues(qdev);
if (qdev->shadow_reg_virt_addr != NULL) {
- pci_free_consistent(qdev->pdev,
- PAGE_SIZE,
- qdev->shadow_reg_virt_addr,
- qdev->shadow_reg_phy_addr);
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
qdev->shadow_reg_virt_addr = NULL;
}
}
@@ -3641,18 +3625,15 @@ static void ql_reset_work(struct work_struct *work)
if (tx_cb->skb) {
netdev_printk(KERN_DEBUG, ndev,
"Freeing lost SKB\n");
- pci_unmap_single(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[0],
- mapaddr),
- dma_unmap_len(&tx_cb->map[0], maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ DMA_TO_DEVICE);
for (j = 1; j < tx_cb->seg_count; j++) {
- pci_unmap_page(qdev->pdev,
- dma_unmap_addr(&tx_cb->map[j],
- mapaddr),
- dma_unmap_len(&tx_cb->map[j],
- maplen),
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[j], mapaddr),
+ dma_unmap_len(&tx_cb->map[j], maplen),
+ DMA_TO_DEVICE);
}
dev_kfree_skb(tx_cb->skb);
tx_cb->skb = NULL;
@@ -3784,13 +3765,10 @@ static int ql3xxx_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)))
pci_using_dac = 1;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ else if (!(err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))))
pci_using_dac = 0;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- }
if (err) {
pr_err("%s no usable DMA configuration\n", pci_name(pdev));
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index c2faf96fcade..96b947fde646 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -520,8 +520,6 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_features_check = qlcnic_features_check,
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 3d7d3ab383f8..3d00b3232308 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -183,6 +183,11 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
if (!skb)
goto done;
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
+ goto done;
+ }
+
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 7be86ef5a584..2728df46ec41 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -63,6 +63,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_50,
RTL_GIGA_MAC_VER_51,
RTL_GIGA_MAC_VER_52,
+ RTL_GIGA_MAC_VER_53,
RTL_GIGA_MAC_VER_60,
RTL_GIGA_MAC_VER_61,
RTL_GIGA_MAC_VER_63,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0d78408b4e26..0a20dae32184 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -28,6 +28,7 @@
#include <linux/bitfield.h>
#include <linux/prefetch.h>
#include <linux/ipv6.h>
+#include <asm/unaligned.h>
#include <net/ip6_checksum.h>
#include "r8169.h"
@@ -145,6 +146,7 @@ static const struct {
[RTL_GIGA_MAC_VER_50] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_51] = {"RTL8168ep/8111ep" },
[RTL_GIGA_MAC_VER_52] = {"RTL8168fp/RTL8117", FIRMWARE_8168FP_3},
+ [RTL_GIGA_MAC_VER_53] = {"RTL8168fp/RTL8117", },
[RTL_GIGA_MAC_VER_60] = {"RTL8125A" },
[RTL_GIGA_MAC_VER_61] = {"RTL8125A", FIRMWARE_8125A_3},
/* reserve 62 for CFG_METHOD_4 in the vendor driver */
@@ -260,6 +262,9 @@ enum rtl8168_8101_registers {
#define CSIAR_BYTE_ENABLE 0x0000f000
#define CSIAR_ADDR_MASK 0x00000fff
PMCH = 0x6f,
+#define D3COLD_NO_PLL_DOWN BIT(7)
+#define D3HOT_NO_PLL_DOWN BIT(6)
+#define D3_NO_PLL_DOWN (BIT(7) | BIT(6))
EPHYAR = 0x80,
#define EPHYAR_FLAG 0x80000000
#define EPHYAR_WRITE_CMD 0x80000000
@@ -529,6 +534,9 @@ enum rtl_rx_desc_bit {
IPFail = (1 << 16), /* IP checksum failed */
UDPFail = (1 << 15), /* UDP/IP checksum failed */
TCPFail = (1 << 14), /* TCP/IP checksum failed */
+
+#define RxCSFailMask (IPFail | UDPFail | TCPFail)
+
RxVlanTag = (1 << 16), /* VLAN tag available */
};
@@ -584,6 +592,12 @@ enum rtl_flag {
RTL_FLAG_MAX
};
+enum rtl_dash_type {
+ RTL_DASH_NONE,
+ RTL_DASH_DP,
+ RTL_DASH_EP,
+};
+
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
struct pci_dev *pci_dev;
@@ -591,6 +605,7 @@ struct rtl8169_private {
struct phy_device *phydev;
struct napi_struct napi;
enum mac_version mac_version;
+ enum rtl_dash_type dash_type;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_tx;
@@ -682,7 +697,7 @@ static bool rtl_is_8168evl_up(struct rtl8169_private *tp)
{
return tp->mac_version >= RTL_GIGA_MAC_VER_34 &&
tp->mac_version != RTL_GIGA_MAC_VER_39 &&
- tp->mac_version <= RTL_GIGA_MAC_VER_52;
+ tp->mac_version <= RTL_GIGA_MAC_VER_53;
}
static bool rtl_supports_eee(struct rtl8169_private *tp)
@@ -746,14 +761,77 @@ static const struct rtl_cond name = { \
\
static bool name ## _check(struct rtl8169_private *tp)
-static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
+static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
{
- if (reg & 0xffff0001) {
- if (net_ratelimit())
- netdev_err(tp->dev, "Invalid ocp reg %x!\n", reg);
- return true;
- }
- return false;
+ /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
+ if (type == ERIAR_OOB &&
+ (tp->mac_version == RTL_GIGA_MAC_VER_52 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_53))
+ *cmd |= 0x7f0 << 18;
+}
+
+DECLARE_RTL_COND(rtl_eriar_cond)
+{
+ return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
+}
+
+static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
+{
+ u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
+
+ if (WARN(addr & 3 || !mask, "addr: 0x%x, mask: 0x%08x\n", addr, mask))
+ return;
+
+ RTL_W32(tp, ERIDR, val);
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
+
+ rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
+}
+
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val)
+{
+ _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
+}
+
+static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
+{
+ u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
+
+ r8168fp_adjust_ocp_cmd(tp, &cmd, type);
+ RTL_W32(tp, ERIAR, cmd);
+
+ return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ RTL_R32(tp, ERIDR) : ~0;
+}
+
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
+{
+ return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
+}
+
+static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
+{
+ u32 val = rtl_eri_read(tp, addr);
+
+ rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
+}
+
+static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
+{
+ rtl_w0w1_eri(tp, addr, p, 0);
+}
+
+static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
+{
+ rtl_w0w1_eri(tp, addr, 0, m);
+}
+
+static bool rtl_ocp_reg_failure(u32 reg)
+{
+ return WARN_ONCE(reg & 0xffff0001, "Invalid ocp reg %x!\n", reg);
}
DECLARE_RTL_COND(rtl_ocp_gphy_cond)
@@ -763,7 +841,7 @@ DECLARE_RTL_COND(rtl_ocp_gphy_cond)
static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return;
RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
@@ -773,7 +851,7 @@ static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return 0;
RTL_W32(tp, GPHY_OCP, reg << 15);
@@ -784,7 +862,7 @@ static int r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return;
RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
@@ -792,7 +870,7 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- if (rtl_ocp_reg_failure(tp, reg))
+ if (rtl_ocp_reg_failure(reg))
return 0;
RTL_W32(tp, OCPDR, reg << 15);
@@ -808,6 +886,25 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
}
+/* Work around a hw issue with RTL8168g PHY, the quirk disables
+ * PHY MCU interrupts before PHY power-down.
+ */
+static void rtl8168g_phy_suspend_quirk(struct rtl8169_private *tp, int value)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ case RTL_GIGA_MAC_VER_49:
+ if (value & BMCR_RESET || !(value & BMCR_PDOWN))
+ rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
+ else
+ rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
+ break;
+ default:
+ break;
+ }
+};
+
static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
if (reg == 0x1f) {
@@ -818,6 +915,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
if (tp->ocp_base != OCP_STD_PHY_BASE)
reg -= 0x10;
+ if (tp->ocp_base == OCP_STD_PHY_BASE && reg == MII_BMCR)
+ rtl8168g_phy_suspend_quirk(tp, value);
+
r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
}
@@ -1009,70 +1109,6 @@ static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
-static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type)
-{
- /* based on RTL8168FP_OOBMAC_BASE in vendor driver */
- if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB)
- *cmd |= 0x7f0 << 18;
-}
-
-DECLARE_RTL_COND(rtl_eriar_cond)
-{
- return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
-}
-
-static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val, int type)
-{
- u32 cmd = ERIAR_WRITE_CMD | type | mask | addr;
-
- BUG_ON((addr & 3) || (mask == 0));
- RTL_W32(tp, ERIDR, val);
- r8168fp_adjust_ocp_cmd(tp, &cmd, type);
- RTL_W32(tp, ERIAR, cmd);
-
- rtl_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
-}
-
-static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
- u32 val)
-{
- _rtl_eri_write(tp, addr, mask, val, ERIAR_EXGMAC);
-}
-
-static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
-{
- u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr;
-
- r8168fp_adjust_ocp_cmd(tp, &cmd, type);
- RTL_W32(tp, ERIAR, cmd);
-
- return rtl_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
- RTL_R32(tp, ERIDR) : ~0;
-}
-
-static u32 rtl_eri_read(struct rtl8169_private *tp, int addr)
-{
- return _rtl_eri_read(tp, addr, ERIAR_EXGMAC);
-}
-
-static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 p, u32 m)
-{
- u32 val = rtl_eri_read(tp, addr);
-
- rtl_eri_write(tp, addr, ERIAR_MASK_1111, (val & ~m) | p);
-}
-
-static void rtl_eri_set_bits(struct rtl8169_private *tp, int addr, u32 p)
-{
- rtl_w0w1_eri(tp, addr, p, 0);
-}
-
-static void rtl_eri_clear_bits(struct rtl8169_private *tp, int addr, u32 m)
-{
- rtl_w0w1_eri(tp, addr, 0, m);
-}
-
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u16 reg)
{
RTL_W32(tp, OCPAR, 0x0fu << 12 | (reg & 0x0fff));
@@ -1158,19 +1194,10 @@ static void rtl8168ep_driver_start(struct rtl8169_private *tp)
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
+ if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_start(tp);
- break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ else
rtl8168ep_driver_start(tp);
- break;
- default:
- BUG();
- break;
- }
}
static void rtl8168dp_driver_stop(struct rtl8169_private *tp)
@@ -1189,44 +1216,52 @@ static void rtl8168ep_driver_stop(struct rtl8169_private *tp)
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
+ if (tp->dash_type == RTL_DASH_DP)
rtl8168dp_driver_stop(tp);
- break;
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ else
rtl8168ep_driver_stop(tp);
- break;
- default:
- BUG();
- break;
- }
}
static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return !!(r8168dp_ocp_read(tp, reg) & 0x00008000);
+ return r8168dp_ocp_read(tp, reg) & BIT(15);
}
static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return r8168ep_ocp_read(tp, 0x128) & 0x00000001;
+ return r8168ep_ocp_read(tp, 0x128) & BIT(0);
}
-static bool r8168_check_dash(struct rtl8169_private *tp)
+static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
- return r8168dp_check_dash(tp);
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
- return r8168ep_check_dash(tp);
+ return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
+ return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
default:
- return false;
+ return RTL_DASH_NONE;
+ }
+}
+
+static void rtl_set_d3_pll_down(struct rtl8169_private *tp, bool enable)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_63:
+ if (enable)
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~D3_NO_PLL_DOWN);
+ else
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | D3_NO_PLL_DOWN);
+ break;
+ default:
+ break;
}
}
@@ -1396,6 +1431,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
rtl_lock_config_regs(tp);
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
+ rtl_set_d3_pll_down(tp, !wolopts);
tp->dev->wol_enabled = wolopts ? 1 : 0;
}
@@ -1930,6 +1966,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x608, RTL_GIGA_MAC_VER_61 },
/* RTL8117 */
+ { 0x7cf, 0x54b, RTL_GIGA_MAC_VER_53 },
{ 0x7cf, 0x54a, RTL_GIGA_MAC_VER_52 },
/* 8168EP family. */
@@ -1962,7 +1999,11 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 },
/* 8168DP family. */
- { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ /* It seems this early RTL8168dp version never made it to
+ * the wild. Let's see whether somebody complains, if not
+ * we'll remove support for this chip version completely.
+ * { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
+ */
{ 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
{ 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 },
@@ -1996,9 +2037,12 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
{ 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 },
{ 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 },
{ 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 },
- /* FIXME: where did these entries come from ? -- FR */
- { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 },
- { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 },
+ /* FIXME: where did these entries come from ? -- FR
+ * Not even r8101 vendor driver knows these id's,
+ * so let's disable detection for now. -- HK
+ * { 0xfc8, 0x388, RTL_GIGA_MAC_VER_13 },
+ * { 0xfc8, 0x308, RTL_GIGA_MAC_VER_13 },
+ */
/* 8110 family. */
{ 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 },
@@ -2081,18 +2125,12 @@ static void rtl8125b_config_eee_mac(struct rtl8169_private *tp)
r8168_mac_ocp_modify(tp, 0xe040, 0, BIT(1) | BIT(0));
}
-static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
+static void rtl_rar_exgmac_set(struct rtl8169_private *tp, const u8 *addr)
{
- const u16 w[] = {
- addr[0] | (addr[1] << 8),
- addr[2] | (addr[3] << 8),
- addr[4] | (addr[5] << 8)
- };
-
- rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, w[0] | (w[1] << 16));
- rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, w[2]);
- rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, w[0] << 16);
- rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, w[1] | (w[2] << 16));
+ rtl_eri_write(tp, 0xe0, ERIAR_MASK_1111, get_unaligned_le32(addr));
+ rtl_eri_write(tp, 0xe4, ERIAR_MASK_1111, get_unaligned_le16(addr + 4));
+ rtl_eri_write(tp, 0xf0, ERIAR_MASK_1111, get_unaligned_le16(addr) << 16);
+ rtl_eri_write(tp, 0xf4, ERIAR_MASK_1111, get_unaligned_le32(addr + 2));
}
u16 rtl8168h_2_get_adc_bias_ioffset(struct rtl8169_private *tp)
@@ -2142,14 +2180,14 @@ static void rtl8169_init_phy(struct rtl8169_private *tp)
genphy_soft_reset(tp->phydev);
}
-static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
+static void rtl_rar_set(struct rtl8169_private *tp, const u8 *addr)
{
rtl_unlock_config_regs(tp);
- RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
+ RTL_W32(tp, MAC4, get_unaligned_le16(addr + 4));
rtl_pci_commit(tp);
- RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
+ RTL_W32(tp, MAC0, get_unaligned_le32(addr));
rtl_pci_commit(tp);
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
@@ -2172,28 +2210,16 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
+static void rtl_wol_enable_rx(struct rtl8169_private *tp)
{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25:
- case RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_29:
- case RTL_GIGA_MAC_VER_30:
- case RTL_GIGA_MAC_VER_32:
- case RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_63:
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
- break;
- default:
- break;
- }
}
-static void rtl_pll_power_down(struct rtl8169_private *tp)
+static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
- if (r8168_check_dash(tp))
+ if (tp->dash_type != RTL_DASH_NONE)
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
@@ -2202,64 +2228,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
if (device_may_wakeup(tp_to_dev(tp))) {
phy_speed_down(tp->phydev, false);
- rtl_wol_suspend_quirk(tp);
- return;
- }
-
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
- break;
- default:
- break;
- }
-}
-
-static void rtl_pll_power_up(struct rtl8169_private *tp)
-{
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
- case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_39:
- case RTL_GIGA_MAC_VER_43:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
- break;
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- break;
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_49:
- RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
- rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
- break;
- default:
- break;
+ rtl_wol_enable_rx(tp);
}
-
- phy_resume(tp->phydev);
}
static void rtl_init_rxcfg(struct rtl8169_private *tp)
@@ -2274,7 +2244,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_38:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63:
@@ -2340,13 +2310,14 @@ static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
static void rtl_jumbo_config(struct rtl8169_private *tp)
{
bool jumbo = tp->dev->mtu > ETH_DATA_LEN;
+ int readrq = 4096;
rtl_unlock_config_regs(tp);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ readrq = 512;
r8168b_1_hw_jumbo_enable(tp);
} else {
r8168b_1_hw_jumbo_disable(tp);
@@ -2354,7 +2325,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_18 ... RTL_GIGA_MAC_VER_26:
if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ readrq = 512;
r8168c_hw_jumbo_enable(tp);
} else {
r8168c_hw_jumbo_disable(tp);
@@ -2367,20 +2338,18 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
r8168dp_hw_jumbo_disable(tp);
break;
case RTL_GIGA_MAC_VER_31 ... RTL_GIGA_MAC_VER_33:
- if (jumbo) {
- pcie_set_readrq(tp->pci_dev, 512);
+ if (jumbo)
r8168e_hw_jumbo_enable(tp);
- } else {
+ else
r8168e_hw_jumbo_disable(tp);
- }
break;
default:
break;
}
rtl_lock_config_regs(tp);
- if (!jumbo && pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
- pcie_set_readrq(tp->pci_dev, 4096);
+ if (pci_is_pcie(tp->pci_dev) && tp->supports_gmii)
+ pcie_set_readrq(tp->pci_dev, readrq);
}
DECLARE_RTL_COND(rtl_chipcmd_cond)
@@ -2449,7 +2418,7 @@ DECLARE_RTL_COND(rtl_rxtx_empty_cond_2)
static void rtl_wait_txrx_fifo_empty(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53:
rtl_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42);
rtl_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42);
break;
@@ -3708,6 +3677,7 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_50] = rtl_hw_start_8168ep_2,
[RTL_GIGA_MAC_VER_51] = rtl_hw_start_8168ep_3,
[RTL_GIGA_MAC_VER_52] = rtl_hw_start_8117,
+ [RTL_GIGA_MAC_VER_53] = rtl_hw_start_8117,
[RTL_GIGA_MAC_VER_60] = rtl_hw_start_8125a_1,
[RTL_GIGA_MAC_VER_61] = rtl_hw_start_8125a_2,
[RTL_GIGA_MAC_VER_63] = rtl_hw_start_8125b,
@@ -4467,10 +4437,9 @@ static inline int rtl8169_fragmented_frame(u32 status)
static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
{
- u32 status = opts1 & RxProtoMask;
+ u32 status = opts1 & (RxProtoMask | RxCSFailMask);
- if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
- ((status == RxProtoUDP) && !(opts1 & UDPFail)))
+ if (status == RxProtoTCP || status == RxProtoUDP)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -4584,8 +4553,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
- rtl_irq_disable(tp);
- napi_schedule(&tp->napi);
+ if (napi_schedule_prep(&tp->napi)) {
+ rtl_irq_disable(tp);
+ __napi_schedule(&tp->napi);
+ }
out:
rtl_ack_events(tp, status);
@@ -4617,10 +4588,10 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
struct net_device *dev = tp->dev;
int work_done;
- work_done = rtl_rx(dev, tp, budget);
-
rtl_tx(dev, tp, budget);
+ work_done = rtl_rx(dev, tp, budget);
+
if (work_done < budget && napi_complete_done(napi, work_done))
rtl_irq_enable(tp);
@@ -4677,12 +4648,12 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl8169_cleanup(tp, true);
- rtl_pll_power_down(tp);
+ rtl_prepare_power_down(tp);
}
static void rtl8169_up(struct rtl8169_private *tp)
{
- rtl_pll_power_up(tp);
+ phy_resume(tp->phydev);
rtl8169_init_phy(tp);
napi_enable(&tp->napi);
set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
@@ -4837,9 +4808,12 @@ static void rtl8169_net_suspend(struct rtl8169_private *tp)
#ifdef CONFIG_PM
-static int rtl8169_net_resume(struct rtl8169_private *tp)
+static int rtl8169_runtime_resume(struct device *dev)
{
+ struct rtl8169_private *tp = dev_get_drvdata(dev);
+
rtl_rar_set(tp, tp->dev->dev_addr);
+ __rtl8169_set_wol(tp, tp->saved_wolopts);
if (tp->TxDescArray)
rtl8169_up(tp);
@@ -4873,7 +4847,7 @@ static int __maybe_unused rtl8169_resume(struct device *device)
if (tp->mac_version == RTL_GIGA_MAC_VER_37)
rtl_init_rxcfg(tp);
- return rtl8169_net_resume(tp);
+ return rtl8169_runtime_resume(device);
}
static int rtl8169_runtime_suspend(struct device *device)
@@ -4893,15 +4867,6 @@ static int rtl8169_runtime_suspend(struct device *device)
return 0;
}
-static int rtl8169_runtime_resume(struct device *device)
-{
- struct rtl8169_private *tp = dev_get_drvdata(device);
-
- __rtl8169_set_wol(tp, tp->saved_wolopts);
-
- return rtl8169_net_resume(tp);
-}
-
static int rtl8169_runtime_idle(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
@@ -4949,12 +4914,10 @@ static void rtl_shutdown(struct pci_dev *pdev)
rtl_rar_set(tp, tp->dev->perm_addr);
if (system_state == SYSTEM_POWER_OFF) {
- if (tp->saved_wolopts) {
- rtl_wol_suspend_quirk(tp);
+ if (tp->saved_wolopts)
rtl_wol_shutdown_quirk(tp);
- }
- pci_wake_from_d3(pdev, true);
+ pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
}
@@ -4968,7 +4931,7 @@ static void rtl_remove_one(struct pci_dev *pdev)
unregister_netdev(tp->dev);
- if (r8168_check_dash(tp))
+ if (tp->dash_type != RTL_DASH_NONE)
rtl8168_driver_stop(tp);
rtl_release_firmware(tp);
@@ -5036,16 +4999,12 @@ static void rtl_read_mac_address(struct rtl8169_private *tp,
{
/* Get MAC address */
if (rtl_is_8168evl_up(tp) && tp->mac_version != RTL_GIGA_MAC_VER_34) {
- u32 value = rtl_eri_read(tp, 0xe0);
-
- mac_addr[0] = (value >> 0) & 0xff;
- mac_addr[1] = (value >> 8) & 0xff;
- mac_addr[2] = (value >> 16) & 0xff;
- mac_addr[3] = (value >> 24) & 0xff;
+ u32 value;
+ value = rtl_eri_read(tp, 0xe0);
+ put_unaligned_le32(value, mac_addr);
value = rtl_eri_read(tp, 0xe4);
- mac_addr[4] = (value >> 0) & 0xff;
- mac_addr[5] = (value >> 8) & 0xff;
+ put_unaligned_le16(value, mac_addr + 4);
} else if (rtl_is_8125(tp)) {
rtl_read_mac_from_reg(tp, mac_addr, MAC0_BKP);
}
@@ -5097,7 +5056,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
new_bus->name = "r8169";
new_bus->priv = tp;
new_bus->parent = &pdev->dev;
- new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
+ new_bus->irq[0] = PHY_MAC_INTERRUPT;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
new_bus->read = r8169_mdio_read_reg;
@@ -5160,7 +5119,7 @@ static void rtl_hw_init_8125(struct rtl8169_private *tp)
static void rtl_hw_initialize(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_52:
+ case RTL_GIGA_MAC_VER_49 ... RTL_GIGA_MAC_VER_53:
rtl8168ep_stop_cmac(tp);
fallthrough;
case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_48:
@@ -5326,12 +5285,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
chipset = rtl8169_get_mac_version(xid, tp->supports_gmii);
if (chipset == RTL_GIGA_MAC_NONE) {
- dev_err(&pdev->dev, "unknown chip XID %03x\n", xid);
+ dev_err(&pdev->dev, "unknown chip XID %03x, contact r8169 maintainers (see MAINTAINERS file)\n", xid);
return -ENODEV;
}
tp->mac_version = chipset;
+ tp->dash_type = rtl_check_dash(tp);
+
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
if (sizeof(dma_addr_t) > 4 && tp->mac_version >= RTL_GIGA_MAC_VER_18 &&
@@ -5401,6 +5362,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
+ rtl_set_d3_pll_down(tp, true);
+
jumbo_max = rtl_jumbo_max(tp);
if (jumbo_max)
dev->max_mtu = jumbo_max;
@@ -5421,9 +5384,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* chip gets powered up in rtl_open() */
- rtl_pll_power_down(tp);
-
rc = register_netdev(dev);
if (rc)
return rc;
@@ -5437,7 +5397,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
jumbo_max, tp->mac_version <= RTL_GIGA_MAC_VER_06 ?
"ok" : "ko");
- if (r8168_check_dash(tp)) {
+ if (tp->dash_type != RTL_DASH_NONE) {
netdev_info(dev, "DASH enabled\n");
rtl8168_driver_start(tp);
}
@@ -5454,9 +5414,7 @@ static struct pci_driver rtl8169_pci_driver = {
.probe = rtl_init_one,
.remove = rtl_remove_one,
.shutdown = rtl_shutdown,
-#ifdef CONFIG_PM
- .driver.pm = &rtl8169_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&rtl8169_pm_ops),
};
module_pci_driver(rtl8169_pci_driver);
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 913d030d73eb..50f0f621b1aa 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -1358,6 +1358,7 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_50] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_51] = rtl8168ep_2_hw_phy_config,
[RTL_GIGA_MAC_VER_52] = rtl8117_hw_phy_config,
+ [RTL_GIGA_MAC_VER_53] = rtl8117_hw_phy_config,
[RTL_GIGA_MAC_VER_60] = rtl8125a_1_hw_phy_config,
[RTL_GIGA_MAC_VER_61] = rtl8125a_2_hw_phy_config,
[RTL_GIGA_MAC_VER_63] = rtl8125b_hw_phy_config,
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 7453b17a37a2..cb47e68c1a3e 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -165,7 +165,7 @@ enum ravb_reg {
GTO2 = 0x03A8,
GIC = 0x03AC,
GIS = 0x03B0,
- GCPT = 0x03B4, /* Undocumented? */
+ GCPT = 0x03B4, /* Documented for R-Car Gen3 only */
GCT0 = 0x03B8,
GCT1 = 0x03BC,
GCT2 = 0x03C0,
@@ -225,7 +225,7 @@ enum CSR_BIT {
CSR_OPS_RESET = 0x00000001,
CSR_OPS_CONFIG = 0x00000002,
CSR_OPS_OPERATION = 0x00000004,
- CSR_OPS_STANDBY = 0x00000008, /* Undocumented? */
+ CSR_OPS_STANDBY = 0x00000008, /* Documented for R-Car Gen3 only */
CSR_DTS = 0x00000100,
CSR_TPO0 = 0x00010000,
CSR_TPO1 = 0x00020000,
@@ -241,13 +241,12 @@ enum ESR_BIT {
ESR_EIL = 0x00001000,
};
-/* APSR */
+/* APSR (R-Car Gen3 only) */
enum APSR_BIT {
- APSR_MEMS = 0x00000002,
- APSR_CMSW = 0x00000010,
- APSR_DM = 0x00006000, /* Undocumented? */
- APSR_DM_RDM = 0x00002000,
- APSR_DM_TDM = 0x00004000,
+ APSR_MEMS = 0x00000002, /* Undocumented */
+ APSR_CMSW = 0x00000010,
+ APSR_RDM = 0x00002000,
+ APSR_TDM = 0x00004000,
};
/* RCR */
@@ -530,16 +529,16 @@ enum RIS2_BIT {
/* TIC */
enum TIC_BIT {
- TIC_FTE0 = 0x00000001, /* Undocumented? */
- TIC_FTE1 = 0x00000002, /* Undocumented? */
+ TIC_FTE0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIC_FTE1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIC_TFUE = 0x00000100,
TIC_TFWE = 0x00000200,
};
/* TIS */
enum TIS_BIT {
- TIS_FTF0 = 0x00000001, /* Undocumented? */
- TIS_FTF1 = 0x00000002, /* Undocumented? */
+ TIS_FTF0 = 0x00000001, /* Documented for R-Car Gen3 only */
+ TIS_FTF1 = 0x00000002, /* Documented for R-Car Gen3 only */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
@@ -547,8 +546,8 @@ enum TIS_BIT {
/* ISS */
enum ISS_BIT {
- ISS_FRS = 0x00000001, /* Undocumented? */
- ISS_FTS = 0x00000004, /* Undocumented? */
+ ISS_FRS = 0x00000001, /* Documented for R-Car Gen3 only */
+ ISS_FTS = 0x00000004, /* Documented for R-Car Gen3 only */
ISS_ES = 0x00000040,
ISS_MS = 0x00000080,
ISS_TFUS = 0x00000100,
@@ -608,13 +607,13 @@ enum GTI_BIT {
/* GIC */
enum GIC_BIT {
- GIC_PTCE = 0x00000001, /* Undocumented? */
+ GIC_PTCE = 0x00000001, /* Documented for R-Car Gen3 only */
GIC_PTME = 0x00000004,
};
/* GIS */
enum GIS_BIT {
- GIS_PTCF = 0x00000001, /* Undocumented? */
+ GIS_PTCF = 0x00000001, /* Documented for R-Car Gen3 only */
GIS_PTMF = 0x00000004,
GIS_RESERVED = GENMASK(15, 10),
};
@@ -808,10 +807,10 @@ enum ECMR_BIT {
ECMR_TE = 0x00000020,
ECMR_RE = 0x00000040,
ECMR_MPDE = 0x00000200,
- ECMR_TXF = 0x00010000, /* Undocumented? */
+ ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */
ECMR_RXF = 0x00020000,
ECMR_PFR = 0x00040000,
- ECMR_ZPF = 0x00080000, /* Undocumented? */
+ ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */
ECMR_RZPF = 0x00100000,
ECMR_DPAD = 0x00200000,
ECMR_RCSC = 0x00800000,
@@ -830,7 +829,7 @@ enum ECSR_BIT {
enum ECSIPR_BIT {
ECSIPR_ICDIP = 0x00000001,
ECSIPR_MPDIP = 0x00000002,
- ECSIPR_LCHNGIP = 0x00000004, /* Undocumented? */
+ ECSIPR_LCHNGIP = 0x00000004,
};
/* PIR */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index bd30505fbc57..eb0c03bdb12d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2034,10 +2034,10 @@ static void ravb_set_delay_mode(struct net_device *ndev)
u32 set = 0;
if (priv->rxcidm)
- set |= APSR_DM_RDM;
+ set |= APSR_RDM;
if (priv->txcidm)
- set |= APSR_DM_TDM;
- ravb_modify(ndev, APSR, APSR_DM, set);
+ set |= APSR_TDM;
+ ravb_modify(ndev, APSR, APSR_RDM | APSR_TDM, set);
}
static int ravb_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h
index 6fad25321dc5..315a6e5c0f59 100644
--- a/drivers/net/ethernet/rocker/rocker.h
+++ b/drivers/net/ethernet/rocker/rocker.h
@@ -103,15 +103,13 @@ struct rocker_world_ops {
int (*port_attr_stp_state_set)(struct rocker_port *rocker_port,
u8 state);
int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans);
+ unsigned long brport_flags);
int (*port_attr_bridge_flags_support_get)(const struct rocker_port *
rocker_port,
unsigned long *
p_brport_flags);
int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans);
+ u32 ageing_time);
int (*port_obj_vlan_add)(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan);
int (*port_obj_vlan_del)(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index dd0bc7f0aaee..3473d296b2e2 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1550,17 +1550,13 @@ static void rocker_world_port_stop(struct rocker_port *rocker_port)
}
static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port,
- u8 state,
- struct switchdev_trans *trans)
+ u8 state)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_stp_state_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return wops->port_attr_stp_state_set(rocker_port, state);
}
@@ -1580,8 +1576,7 @@ rocker_world_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ struct switchdev_brport_flags flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
unsigned long brport_flags_s;
@@ -1595,7 +1590,7 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
if (err)
return err;
- if (brport_flags & ~brport_flags_s)
+ if (flags.mask & ~brport_flags_s)
return -EINVAL;
return 0;
@@ -1603,52 +1598,37 @@ rocker_world_port_attr_pre_bridge_flags_set(struct rocker_port *rocker_port,
static int
rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ struct switchdev_brport_flags flags)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_flags_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_flags_set(rocker_port, brport_flags,
- trans);
+ return wops->port_attr_bridge_flags_set(rocker_port, flags.val);
}
static int
rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans)
-
+ u32 ageing_time)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_attr_bridge_ageing_time_set)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time,
- trans);
+ return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time);
}
static int
rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
struct rocker_world_ops *wops = rocker_port->rocker->wops;
if (!wops->port_obj_vlan_add)
return -EOPNOTSUPP;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
return wops->port_obj_vlan_add(rocker_port, vlan);
}
@@ -2066,8 +2046,7 @@ static const struct net_device_ops rocker_port_netdev_ops = {
********************/
static int rocker_port_attr_set(struct net_device *dev,
- const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ const struct switchdev_attr *attr)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
@@ -2075,23 +2054,19 @@ static int rocker_port_attr_set(struct net_device *dev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
err = rocker_world_port_attr_stp_state_set(rocker_port,
- attr->u.stp_state,
- trans);
+ attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
err = rocker_world_port_attr_pre_bridge_flags_set(rocker_port,
- attr->u.brport_flags,
- trans);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
err = rocker_world_port_attr_bridge_flags_set(rocker_port,
- attr->u.brport_flags,
- trans);
+ attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port,
- attr->u.ageing_time,
- trans);
+ attr->u.ageing_time);
break;
default:
err = -EOPNOTSUPP;
@@ -2102,8 +2077,7 @@ static int rocker_port_attr_set(struct net_device *dev,
}
static int rocker_port_obj_add(struct net_device *dev,
- const struct switchdev_obj *obj,
- struct switchdev_trans *trans)
+ const struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
@@ -2111,8 +2085,7 @@ static int rocker_port_obj_add(struct net_device *dev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
err = rocker_world_port_obj_vlan_add(rocker_port,
- SWITCHDEV_OBJ_PORT_VLAN(obj),
- trans);
+ SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
default:
err = -EOPNOTSUPP;
@@ -2725,8 +2698,7 @@ rocker_switchdev_port_attr_set_event(struct net_device *netdev,
{
int err;
- err = rocker_port_attr_set(netdev, port_attr_info->attr,
- port_attr_info->trans);
+ err = rocker_port_attr_set(netdev, port_attr_info->attr);
port_attr_info->handled = true;
return notifier_from_errno(err);
@@ -2847,8 +2819,7 @@ rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev,
switch (event) {
case SWITCHDEV_PORT_OBJ_ADD:
- err = rocker_port_obj_add(netdev, port_obj_info->obj,
- port_obj_info->trans);
+ err = rocker_port_obj_add(netdev, port_obj_info->obj);
break;
case SWITCHDEV_PORT_OBJ_DEL:
err = rocker_port_obj_del(netdev, port_obj_info->obj);
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 7072b249c8bd..967a634ee9ac 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -923,7 +923,7 @@ static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port,
struct ofdpa_flow_tbl_entry *entry;
u32 priority;
bool vlan_bridging = !!vlan_id;
- bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
+ bool dflt = !eth_dst || eth_dst_mask;
bool wild = false;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
@@ -2488,8 +2488,7 @@ static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port,
}
static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
- unsigned long brport_flags,
- struct switchdev_trans *trans)
+ unsigned long brport_flags)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
unsigned long orig_flags;
@@ -2497,14 +2496,11 @@ static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port,
orig_flags = ofdpa_port->brport_flags;
ofdpa_port->brport_flags = brport_flags;
- if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING &&
- !switchdev_trans_ph_prepare(trans))
+
+ if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING)
err = rocker_port_set_learning(ofdpa_port->rocker_port,
!!(ofdpa_port->brport_flags & BR_LEARNING));
- if (switchdev_trans_ph_prepare(trans))
- ofdpa_port->brport_flags = orig_flags;
-
return err;
}
@@ -2520,18 +2516,15 @@ ofdpa_port_attr_bridge_flags_support_get(const struct rocker_port *
static int
ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port,
- u32 ageing_time,
- struct switchdev_trans *trans)
+ u32 ageing_time)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
struct ofdpa *ofdpa = ofdpa_port->ofdpa;
- if (!switchdev_trans_ph_prepare(trans)) {
- ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
- if (ofdpa_port->ageing_time < ofdpa->ageing_time)
- ofdpa->ageing_time = ofdpa_port->ageing_time;
- mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
- }
+ ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time);
+ if (ofdpa_port->ageing_time < ofdpa->ageing_time)
+ ofdpa->ageing_time = ofdpa_port->ageing_time;
+ mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies);
return 0;
}
@@ -2540,32 +2533,16 @@ static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_add(ofdpa_port, vid, vlan->flags);
- if (err)
- return err;
- }
- return 0;
+ return ofdpa_port_vlan_add(ofdpa_port, vlan->vid, vlan->flags);
}
static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port,
const struct switchdev_obj_port_vlan *vlan)
{
struct ofdpa_port *ofdpa_port = rocker_port->wpriv;
- u16 vid;
- int err;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags);
- if (err)
- return err;
- }
- return 0;
+ return ofdpa_port_vlan_del(ofdpa_port, vlan->vid, vlan->flags);
}
static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port,
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
index b1e7f7ab281c..fceb6d637235 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -203,8 +203,8 @@ int sxgbe_mdio_register(struct net_device *ndev)
case PHY_POLL:
irq_str = "POLL";
break;
- case PHY_IGNORE_INTERRUPT:
- irq_str = "IGNORE";
+ case PHY_MAC_INTERRUPT:
+ irq_str = "MAC";
break;
default:
sprintf(irq_num, "%d", phy->irq);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 718308076341..36c8625a6fd7 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -612,8 +612,6 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
- .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
- .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_xdp_xmit = efx_xdp_xmit,
.ndo_bpf = efx_xdp
};
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index a4a626e9cd9a..1bfeee283ea9 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -17,6 +17,7 @@
#include "rx_common.h"
#include "nic.h"
#include "sriov.h"
+#include "workarounds.h"
/* This is the first interrupt mode to try out of:
* 0 => MSI-X
@@ -137,6 +138,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
{
unsigned int n_channels = parallelism;
int vec_count;
+ int tx_per_ev;
int n_xdp_tx;
int n_xdp_ev;
@@ -149,9 +151,9 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* multiple tx queues, assuming tx and ev queues are both
* maximum size.
*/
-
+ tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_MAX_TXQ_PER_CHANNEL);
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index aaa112877561..89c5c75f479f 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -293,14 +293,10 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
efx->rx_prefix_size);
- xdp.data = *ehp;
- xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
-
+ xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
/* No support yet for XDP metadata */
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + rx_buf->len;
- xdp.rxq = &rx_queue->xdp_rxq_info;
- xdp.frame_sz = efx->rx_page_buf_step;
+ xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
+ rx_buf->len, false);
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
rcu_read_unlock();
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 742a1f7a838c..891b49281bc6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2191,7 +2191,7 @@ static const struct of_device_id smc91x_match[] = {
MODULE_DEVICE_TABLE(of, smc91x_match);
/**
- * of_try_set_control_gpio - configure a gpio if it exists
+ * try_toggle_control_gpio - configure a gpio if it exists
* @dev: net device
* @desc: where to store the GPIO descriptor, if it exists
* @name: name of the GPIO in DT
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 823d9a7184fe..606c79de93a6 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -557,6 +557,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
unsigned int addr;
int i, reg;
+ pm_runtime_get_sync(bus->parent);
spin_lock_irqsave(&pdata->mac_lock, flags);
/* Confirm MII not busy */
@@ -582,6 +583,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
out:
spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ pm_runtime_put(bus->parent);
return reg;
}
@@ -594,6 +596,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
unsigned int addr;
int i, reg;
+ pm_runtime_get_sync(bus->parent);
spin_lock_irqsave(&pdata->mac_lock, flags);
/* Confirm MII not busy */
@@ -623,6 +626,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
out:
spin_unlock_irqrestore(&pdata->mac_lock, flags);
+ pm_runtime_put(bus->parent);
return reg;
}
@@ -1589,6 +1593,8 @@ static int smsc911x_open(struct net_device *dev)
int retval;
int irq_flags;
+ pm_runtime_get_sync(dev->dev.parent);
+
/* find and start the given phy */
if (!dev->phydev) {
retval = smsc911x_mii_probe(dev);
@@ -1735,6 +1741,7 @@ mii_free_out:
phy_disconnect(dev->phydev);
dev->phydev = NULL;
out:
+ pm_runtime_put(dev->dev.parent);
return retval;
}
@@ -1766,6 +1773,7 @@ static int smsc911x_stop(struct net_device *dev)
dev->phydev = NULL;
}
netif_carrier_off(dev);
+ pm_runtime_put(dev->dev.parent);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -2334,7 +2342,6 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
free_netdev(dev);
- pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -2540,6 +2547,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
}
spin_unlock_irq(&pdata->mac_lock);
+ pm_runtime_put(&pdev->dev);
netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 19d20a6d0d44..3c53051bdacf 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -956,8 +956,7 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
u32 xdp_act = 0;
int done = 0;
- xdp.rxq = &dring->xdp_rxq;
- xdp.frame_sz = PAGE_SIZE;
+ xdp_init_buff(&xdp, PAGE_SIZE, &dring->xdp_rxq);
rcu_read_lock();
xdp_prog = READ_ONCE(priv->xdp_prog);
@@ -1016,10 +1015,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
dma_dir);
prefetch(desc->addr);
- xdp.data_hard_start = desc->addr;
- xdp.data = desc->addr + NETSEC_RXBUF_HEADROOM;
- xdp_set_data_meta_invalid(&xdp);
- xdp.data_end = xdp.data + pkt_len;
+ xdp_prepare_buff(&xdp, desc->addr, NETSEC_RXBUF_HEADROOM,
+ pkt_len, false);
if (xdp_prog) {
xdp_result = netsec_run_xdp(priv, xdp_prog, &xdp);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 53f14c5a9e02..e675ba12fde2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -219,6 +219,14 @@ config DWMAC_INTEL_PLAT
This selects the Intel platform specific glue layer support for
the stmmac device driver. This driver is used for the Intel Keem Bay
SoC.
+
+config DWMAC_VISCONTI
+ tristate "Toshiba Visconti DWMAC support"
+ default ARCH_VISCONTI
+ depends on OF && COMMON_CLK && (ARCH_VISCONTI || COMPILE_TEST)
+ help
+ Support for ethernet controller on Visconti SoCs.
+
endif
config DWMAC_INTEL
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 24e6145d4eae..366740ab9c5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
+obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o
stmmac-platform-objs:= stmmac_platform.o
dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
index ba0e4d2b256a..6c19fcc76c6f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -74,8 +74,6 @@ MODULE_DEVICE_TABLE(of, intel_eth_plat_match);
static int intel_eth_plat_probe(struct platform_device *pdev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
const struct of_device_id *match;
@@ -83,7 +81,6 @@ static int intel_eth_plat_probe(struct platform_device *pdev)
unsigned long rate;
int ret;
- plat_dat = priv->plat;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 103d2448e9e0..751dfdeec41c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -236,6 +236,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
int ret;
int i;
+ plat->pdev = pdev;
plat->phy_addr = -1;
plat->clk_csr = 5;
plat->has_gmac = 0;
@@ -459,6 +460,21 @@ static struct stmmac_pci_info tgl_sgmii1g_info = {
.setup = tgl_sgmii_data,
};
+static int adls_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
+ /* SerDes power up and power down are done in BIOS for ADL */
+
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adls_sgmii1g_info = {
+ .setup = adls_sgmii_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -726,6 +742,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0_ID 0x7aac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1_ID 0x7aad
static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, QUARK_ID, &quark_info) },
@@ -741,6 +759,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0_ID, &adls_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1_ID, &adls_sgmii1g_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index f184b00f5116..848e5c37746b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -68,10 +68,21 @@
*/
#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+#define PRG_ETH1 0x4
+
+/* Defined for adding a delay to the input RX_CLK for better timing.
+ * Each step is 200ps. These bits are used with external RGMII PHYs
+ * because RGMII RX only has the small window. cfg_rxclk_dly can
+ * adjust the window between RX_CLK and RX_DATA and improve the stability
+ * of "rx data valid".
+ */
+#define PRG_ETH1_CFG_RXCLK_DLY GENMASK(19, 16)
+
struct meson8b_dwmac;
struct meson8b_dwmac_data {
int (*set_phy_mode)(struct meson8b_dwmac *dwmac);
+ bool has_prg_eth1_rgmii_rx_delay;
};
struct meson8b_dwmac {
@@ -82,7 +93,7 @@ struct meson8b_dwmac {
phy_interface_t phy_mode;
struct clk *rgmii_tx_clk;
u32 tx_delay_ns;
- u32 rx_delay_ns;
+ u32 rx_delay_ps;
struct clk *timing_adj_clk;
};
@@ -268,32 +279,37 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
return 0;
}
-static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)
{
- u32 tx_dly_config, rx_dly_config, delay_config;
+ u32 tx_dly_config, rx_adj_config, cfg_rxclk_dly, delay_config;
int ret;
+ rx_adj_config = 0;
+ cfg_rxclk_dly = 0;
tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
dwmac->tx_delay_ns >> 1);
- if (dwmac->rx_delay_ns == 2)
- rx_dly_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
- else
- rx_dly_config = 0;
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay)
+ cfg_rxclk_dly = FIELD_PREP(PRG_ETH1_CFG_RXCLK_DLY,
+ dwmac->rx_delay_ps / 200);
+ else if (dwmac->rx_delay_ps == 2000)
+ rx_adj_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
- delay_config = tx_dly_config | rx_dly_config;
+ delay_config = tx_dly_config | rx_adj_config;
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
delay_config = tx_dly_config;
+ cfg_rxclk_dly = 0;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
- delay_config = rx_dly_config;
+ delay_config = rx_adj_config;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RMII:
delay_config = 0;
+ cfg_rxclk_dly = 0;
break;
default:
dev_err(dwmac->dev, "unsupported phy-mode %s\n",
@@ -301,7 +317,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
return -EINVAL;
}
- if (rx_dly_config & PRG_ETH0_ADJ_ENABLE) {
+ if (delay_config & PRG_ETH0_ADJ_ENABLE) {
if (!dwmac->timing_adj_clk) {
dev_err(dwmac->dev,
"The timing-adjustment clock is mandatory for the RX delay re-timing\n");
@@ -323,6 +339,16 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
delay_config);
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH1, PRG_ETH1_CFG_RXCLK_DLY,
+ cfg_rxclk_dly);
+
+ return 0;
+}
+
+static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+{
+ int ret;
+
if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
/* only relevant for RMII mode -> disable in RGMII mode */
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
@@ -406,16 +432,30 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
&dwmac->tx_delay_ns))
dwmac->tx_delay_ns = 2;
- /* use 0ns as fallback since this is what most boards actually use */
- if (of_property_read_u32(pdev->dev.of_node, "amlogic,rx-delay-ns",
- &dwmac->rx_delay_ns))
- dwmac->rx_delay_ns = 0;
+ /* RX delay defaults to 0ps since this is what many boards use */
+ if (of_property_read_u32(pdev->dev.of_node, "rx-internal-delay-ps",
+ &dwmac->rx_delay_ps)) {
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ps))
+ /* convert ns to ps */
+ dwmac->rx_delay_ps *= 1000;
+ }
- if (dwmac->rx_delay_ns != 0 && dwmac->rx_delay_ns != 2) {
- dev_err(&pdev->dev,
- "The only allowed RX delays values are: 0ns, 2ns");
- ret = -EINVAL;
- goto err_remove_config_dt;
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay) {
+ if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) {
+ dev_err(dwmac->dev,
+ "The RGMII RX delay range is 0..3000ps in 200ps steps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ } else {
+ if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) {
+ dev_err(dwmac->dev,
+ "The only allowed RGMII RX delays values are: 0ps, 2000ps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
}
dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
@@ -425,6 +465,10 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ ret = meson8b_init_rgmii_delays(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
ret = meson8b_init_rgmii_tx_clk(dwmac);
if (ret)
goto err_remove_config_dt;
@@ -453,10 +497,17 @@ err_remove_config_dt:
static const struct meson8b_dwmac_data meson8b_dwmac_data = {
.set_phy_mode = meson8b_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
};
static const struct meson8b_dwmac_data meson_axg_dwmac_data = {
.set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
+};
+
+static const struct meson8b_dwmac_data meson_g12a_dwmac_data = {
+ .set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = true,
};
static const struct of_device_id meson8b_dwmac_match[] = {
@@ -478,7 +529,7 @@ static const struct of_device_id meson8b_dwmac_match[] = {
},
{
.compatible = "amlogic,meson-g12a-dwmac",
- .data = &meson_axg_dwmac_data,
+ .data = &meson_g12a_dwmac_data,
},
{ }
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index a5e0eff4a387..6b75cf2603ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -805,12 +805,12 @@ static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
/* Make sure the EPHY is properly reseted, as U-Boot may leave
* it at deasserted state, and thus it may fail to reset EMAC.
+ *
+ * This assumes the driver has exclusive access to the EPHY reset.
*/
- reset_control_assert(gmac->rst_ephy);
-
- ret = reset_control_deassert(gmac->rst_ephy);
+ ret = reset_control_reset(gmac->rst_ephy);
if (ret) {
- dev_err(priv->device, "Cannot deassert internal phy\n");
+ dev_err(priv->device, "Cannot reset internal PHY\n");
clk_disable_unprepare(gmac->ephy_clk);
return ret;
}
@@ -820,15 +820,14 @@ static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
return 0;
}
-static int sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
+static void sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
{
if (!gmac->internal_phy_powered)
- return 0;
+ return;
clk_disable_unprepare(gmac->ephy_clk);
reset_control_assert(gmac->rst_ephy);
gmac->internal_phy_powered = false;
- return 0;
}
/* MDIO multiplexing switch function
@@ -1019,10 +1018,8 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
- if (gmac->variant->soc_has_internal_phy) {
- if (gmac->internal_phy_powered)
- sun8i_dwmac_unpower_internal_phy(gmac);
- }
+ if (gmac->variant->soc_has_internal_phy)
+ sun8i_dwmac_unpower_internal_phy(gmac);
clk_disable_unprepare(gmac->tx_clk);
@@ -1232,6 +1229,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ndev = dev_get_drvdata(&pdev->dev);
priv = netdev_priv(ndev);
+
/* The mux must be registered after parent MDIO
* so after stmmac_dvr_probe()
*/
@@ -1250,7 +1248,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
goto dwmac_remove;
}
- return ret;
+ return 0;
+
dwmac_mux:
reset_control_put(gmac->rst_ephy);
clk_put(gmac->ephy_clk);
@@ -1285,6 +1284,15 @@ static int sun8i_dwmac_remove(struct platform_device *pdev)
return 0;
}
+static void sun8i_dwmac_shutdown(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ sun8i_dwmac_exit(pdev, gmac);
+}
+
static const struct of_device_id sun8i_dwmac_match[] = {
{ .compatible = "allwinner,sun8i-h3-emac",
.data = &emac_variant_h3 },
@@ -1305,6 +1313,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
.remove = sun8i_dwmac_remove,
+ .shutdown = sun8i_dwmac_shutdown,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
new file mode 100644
index 000000000000..b7a0c57dfbfb
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Toshiba Visconti Ethernet Support
+ *
+ * (C) Copyright 2020 TOSHIBA CORPORATION
+ * (C) Copyright 2020 Toshiba Electronic Devices & Storage Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+#include "dwmac4.h"
+
+#define REG_ETHER_CONTROL 0x52D4
+#define ETHER_ETH_CONTROL_RESET BIT(17)
+
+#define REG_ETHER_CLOCK_SEL 0x52D0
+#define ETHER_CLK_SEL_TX_CLK_EN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EN BIT(1)
+#define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
+#define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
+#define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
+#define ETHER_CLK_SEL_DIV_SEL_20 BIT(0)
+#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
+#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
+#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
+#define ETHER_CLK_SEL_TX_CLK_O_TX_I BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
+#define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
+
+#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
+
+#define ETHER_CONFIG_INTF_MII 0
+#define ETHER_CONFIG_INTF_RGMII BIT(0)
+#define ETHER_CONFIG_INTF_RMII BIT(2)
+
+struct visconti_eth {
+ void __iomem *reg;
+ u32 phy_intf_sel;
+ struct clk *phy_ref_clk;
+ spinlock_t lock; /* lock to protect register update */
+};
+
+static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct visconti_eth *dwmac = priv;
+ unsigned int val, clk_sel_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwmac->lock, flags);
+
+ /* adjust link */
+ val = readl(dwmac->reg + MAC_CTRL_REG);
+ val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES);
+
+ switch (speed) {
+ case SPEED_1000:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
+ break;
+ case SPEED_100:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
+ clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
+ val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
+ break;
+ case SPEED_10:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
+ clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
+ val |= GMAC_CONFIG_PS;
+ break;
+ default:
+ /* No bit control */
+ break;
+ }
+
+ writel(val, dwmac->reg + MAC_CTRL_REG);
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ switch (dwmac->phy_intf_sel) {
+ case ETHER_CONFIG_INTF_RGMII:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ break;
+ case ETHER_CONFIG_INTF_RMII:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ break;
+ case ETHER_CONFIG_INTF_MII:
+ default:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_EN;
+ break;
+ }
+
+ /* Start clock */
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ spin_unlock_irqrestore(&dwmac->lock, flags);
+}
+
+static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
+{
+ struct visconti_eth *dwmac = plat_dat->bsp_priv;
+ unsigned int reg_val, clk_sel_val;
+
+ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RMII;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ reg_val = dwmac->phy_intf_sel;
+ writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+
+ /* Enable TX/RX clock */
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
+ writel(clk_sel_val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ writel((clk_sel_val | ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN),
+ dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* release internal-reset */
+ reg_val |= ETHER_ETH_CONTROL_RESET;
+ writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+
+ return 0;
+}
+
+static int visconti_eth_clock_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
+{
+ struct visconti_eth *dwmac = plat_dat->bsp_priv;
+ int err;
+
+ dwmac->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(dwmac->phy_ref_clk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ return PTR_ERR(dwmac->phy_ref_clk);
+ }
+
+ err = clk_prepare_enable(dwmac->phy_ref_clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int visconti_eth_clock_remove(struct platform_device *pdev)
+{
+ struct visconti_eth *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(dwmac->phy_ref_clk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+
+ return 0;
+}
+
+static int visconti_eth_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct visconti_eth *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto remove_config;
+ }
+
+ dwmac->reg = stmmac_res.addr;
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
+
+ ret = visconti_eth_clock_probe(pdev, plat_dat);
+ if (ret)
+ goto remove_config;
+
+ visconti_eth_init_hw(pdev, plat_dat);
+
+ plat_dat->dma_cfg->aal = 1;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto remove;
+
+ return ret;
+
+remove:
+ visconti_eth_clock_remove(pdev);
+remove_config:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int visconti_eth_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = stmmac_pltfr_remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+
+ err = visconti_eth_clock_remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove clock: %d\n", err);
+
+ stmmac_remove_config_dt(pdev, priv->plat);
+
+ return err;
+}
+
+static const struct of_device_id visconti_eth_dwmac_match[] = {
+ { .compatible = "toshiba,visconti-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, visconti_eth_dwmac_match);
+
+static struct platform_driver visconti_eth_dwmac_driver = {
+ .probe = visconti_eth_dwmac_probe,
+ .remove = visconti_eth_dwmac_remove,
+ .driver = {
+ .name = "visconti-eth-dwmac",
+ .of_match_table = visconti_eth_dwmac_match,
+ },
+};
+module_platform_driver(visconti_eth_dwmac_driver);
+
+MODULE_AUTHOR("Toshiba");
+MODULE_DESCRIPTION("Toshiba Visconti Ethernet DWMAC glue driver");
+MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9e54f953634b..c5642985ef95 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -268,6 +268,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
+ if (priv->plat->pdev) {
+ strlcpy(info->bus_info, pci_name(priv->plat->pdev),
+ sizeof(info->bus_info));
+ }
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 8ed3b2c834a0..56985542e202 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -324,7 +324,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
} else if (!qopt->enable) {
- return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
+ MTL_QUEUE_DCB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
/* Port Transmit Rate and Speed Divider */
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
index 8c4195a9a2cc..589797bad1f9 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-desc.c
@@ -634,7 +634,7 @@ err_out:
void xlgmac_init_desc_ops(struct xlgmac_desc_ops *desc_ops)
{
- desc_ops->alloc_channles_and_rings = xlgmac_alloc_channels_and_rings;
+ desc_ops->alloc_channels_and_rings = xlgmac_alloc_channels_and_rings;
desc_ops->free_channels_and_rings = xlgmac_free_channels_and_rings;
desc_ops->map_tx_skb = xlgmac_map_tx_skb;
desc_ops->map_rx_buffer = xlgmac_map_rx_buffer;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 26aa7f32151f..26d178f8616b 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -654,7 +654,7 @@ static int xlgmac_open(struct net_device *netdev)
pdata->rx_buf_size = ret;
/* Allocate the channels and rings */
- ret = desc_ops->alloc_channles_and_rings(pdata);
+ ret = desc_ops->alloc_channels_and_rings(pdata);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
index cab3e40a86b9..8598aaf3ec99 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -379,7 +379,7 @@ struct xlgmac_channel {
} ____cacheline_aligned;
struct xlgmac_desc_ops {
- int (*alloc_channles_and_rings)(struct xlgmac_pdata *pdata);
+ int (*alloc_channels_and_rings)(struct xlgmac_pdata *pdata);
void (*free_channels_and_rings)(struct xlgmac_pdata *pdata);
int (*map_tx_skb)(struct xlgmac_channel *channel,
struct sk_buff *skb);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index abfc4c435d59..affcf92cd3aa 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -92,6 +92,7 @@ config TI_CPTS
config TI_K3_AM65_CPSW_NUSS
tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ select NET_DEVLINK
select TI_DAVINCI_MDIO
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
@@ -105,6 +106,15 @@ config TI_K3_AM65_CPSW_NUSS
To compile this driver as a module, choose M here: the module
will be called ti-am65-cpsw-nuss.
+config TI_K3_AM65_CPSW_SWITCHDEV
+ bool "TI K3 AM654x/J721E CPSW Switch mode support"
+ depends on TI_K3_AM65_CPSW_NUSS
+ depends on NET_SWITCHDEV
+ help
+ This enables switchdev support for TI K3 CPSWxG Ethernet
+ Switch. Enable this driver to support hardware switch support for AM65
+ CPSW NUSS driver.
+
config TI_K3_AM65_CPTS
tristate "TI K3 AM65x CPTS"
depends on ARCH_K3 && OF
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index 6e779292545d..75f761efbea7 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -26,4 +26,5 @@ keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.
obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 766e8866bbef..638d7b03be4b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -31,6 +31,7 @@
#include "cpsw_ale.h"
#include "cpsw_sl.h"
#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-switchdev.h"
#include "k3-cppi-desc-pool.h"
#include "am65-cpts.h"
@@ -228,6 +229,9 @@ static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
u32 port_mask, unreg_mcast = 0;
int ret;
+ if (!common->is_emac_mode)
+ return 0;
+
if (!netif_running(ndev) || !vid)
return 0;
@@ -255,6 +259,9 @@ static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
int ret;
+ if (!common->is_emac_mode)
+ return 0;
+
if (!netif_running(ndev) || !vid)
return 0;
@@ -277,6 +284,11 @@ static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
{
struct am65_cpsw_common *common = port->common;
+ if (promisc && !common->is_emac_mode) {
+ dev_dbg(common->dev, "promisc mode requested in switch mode");
+ return;
+ }
+
if (promisc) {
/* Enable promiscuous mode */
cpsw_ale_control_set(common->ale, port->port_id,
@@ -366,8 +378,9 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_err(dev, "Failed to map rx skb buffer\n");
return -EINVAL;
@@ -375,6 +388,7 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
swdata = cppi5_hdesc_get_swdata(desc_rx);
*((void **)swdata) = skb;
@@ -406,6 +420,11 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
}
+static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
+static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
+static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
netdev_features_t features)
{
@@ -452,9 +471,6 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
ALE_DEFAULT_THREAD_ID, 0);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
ALE_DEFAULT_THREAD_ENABLE, 1);
- if (AM65_CPSW_IS_CPSW2G(common))
- cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
- ALE_PORT_NOLEARN, 1);
/* switch to vlan unaware mode */
cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
@@ -468,6 +484,11 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
port_mask, port_mask,
port_mask & ~ALE_PORT_HOST);
+ if (common->is_emac_mode)
+ am65_cpsw_init_host_port_emac(common);
+ else
+ am65_cpsw_init_host_port_switch(common);
+
for (i = 0; i < common->rx_chns.descs_num; i++) {
skb = __netdev_alloc_skb_ip_align(NULL,
AM65_CPSW_MAX_PACKET_SIZE,
@@ -596,7 +617,6 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
- u32 port_mask;
int ret, i;
ret = pm_runtime_get_sync(common->dev);
@@ -629,19 +649,10 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
- if (port->slave.mac_only) {
- /* enable mac-only mode on port */
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_MACONLY, 1);
- cpsw_ale_control_set(common->ale, port->port_id,
- ALE_PORT_NOLEARN, 1);
- }
-
- port_mask = BIT(port->port_id) | ALE_PORT_HOST;
- cpsw_ale_add_ucast(common->ale, ndev->dev_addr,
- HOST_PORT_NUM, ALE_SECURE, 0);
- cpsw_ale_add_mcast(common->ale, ndev->broadcast,
- port_mask, 0, 0, ALE_MCAST_FWD_2);
+ if (common->is_emac_mode)
+ am65_cpsw_init_port_emac_ale(port);
+ else
+ am65_cpsw_init_port_switch_ale(port);
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
@@ -691,8 +702,9 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
- dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
dev_kfree_skb_any(skb);
@@ -779,6 +791,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
swdata = cppi5_hdesc_get_swdata(desc_rx);
skb = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
@@ -793,18 +806,19 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
- dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
if (new_skb) {
+ ndev_priv = netdev_priv(ndev);
+ am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
am65_cpsw_nuss_rx_csum(skb, csum_info);
napi_gro_receive(&common->napi_rx, skb);
- ndev_priv = netdev_priv(ndev);
stats = this_cpu_ptr(ndev_priv->stats);
u64_stats_update_begin(&stats->syncp);
@@ -864,7 +878,6 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
- struct device *dev,
struct cppi5_host_desc_t *desc)
{
struct cppi5_host_desc_t *first_desc, *next_desc;
@@ -875,20 +888,23 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
next_desc = first_desc;
cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_single(dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
while (next_desc_dma) {
next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
next_desc_dma);
cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
- dma_unmap_page(dev, buf_dma, buf_dma_len,
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
DMA_TO_DEVICE);
next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
}
@@ -906,7 +922,7 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
dev_kfree_skb_any(skb);
}
@@ -926,7 +942,7 @@ am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
ndev = skb->dev;
@@ -1119,9 +1135,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
netif_txq = netdev_get_tx_queue(ndev, q_idx);
/* Map the linear buffer */
- buf_dma = dma_map_single(dev, skb->data, pkt_len,
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb buffer\n");
ndev->stats.tx_errors++;
goto err_free_skb;
@@ -1130,7 +1146,8 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
if (!first_desc) {
dev_dbg(dev, "Failed to allocate descriptor\n");
- dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
+ DMA_TO_DEVICE);
goto busy_stop_q;
}
@@ -1140,6 +1157,7 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
cppi5_hdesc_set_pkttype(first_desc, 0x7);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
swdata = cppi5_hdesc_get_swdata(first_desc);
*(swdata) = skb;
@@ -1175,9 +1193,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
- buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size,
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev, buf_dma))) {
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
dev_err(dev, "Failed to map tx skb page\n");
k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
ndev->stats.tx_errors++;
@@ -1185,11 +1203,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
}
cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
cppi5_hdesc_attach_buf(next_desc,
buf_dma, frag_size, buf_dma, frag_size);
desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
pkt_len += frag_size;
@@ -1237,14 +1257,14 @@ done_tx:
return NETDEV_TX_OK;
err_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
err_free_skb:
ndev->stats.tx_dropped++;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
busy_free_descs:
- am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc);
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
busy_stop_q:
netif_tx_stop_queue(netif_txq);
return NETDEV_TX_BUSY;
@@ -1441,6 +1461,13 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
+static struct devlink_port *am65_cpsw_ndo_get_devlink_port(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ return &port->devlink_port;
+}
+
static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
@@ -1454,6 +1481,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
.ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
+ .ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
};
static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
@@ -1545,16 +1573,6 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
tx_chn->common = common;
tx_chn->id = i;
tx_chn->descs_num = max_desc_num;
- tx_chn->desc_pool =
- k3_cppi_desc_pool_create_name(dev,
- tx_chn->descs_num,
- hdesc_size,
- tx_chn->tx_chn_name);
- if (IS_ERR(tx_chn->desc_pool)) {
- ret = PTR_ERR(tx_chn->desc_pool);
- dev_err(dev, "Failed to create poll %d\n", ret);
- goto err;
- }
tx_chn->tx_chn =
k3_udma_glue_request_tx_chn(dev,
@@ -1565,6 +1583,17 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
"Failed to request tx dma channel\n");
goto err;
}
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+
+ tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
+ goto err;
+ }
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq <= 0) {
@@ -1622,14 +1651,6 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
/* init all flows */
rx_chn->dev = dev;
rx_chn->descs_num = max_desc_num;
- rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev,
- rx_chn->descs_num,
- hdesc_size, "rx");
- if (IS_ERR(rx_chn->desc_pool)) {
- ret = PTR_ERR(rx_chn->desc_pool);
- dev_err(dev, "Failed to create rx poll %d\n", ret);
- goto err;
- }
rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
if (IS_ERR(rx_chn->rx_chn)) {
@@ -1637,6 +1658,16 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
"Failed to request rx dma channel\n");
goto err;
}
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size, "rx");
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ dev_err(dev, "Failed to create rx poll %d\n", ret);
+ goto err;
+ }
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
@@ -2018,6 +2049,441 @@ static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
}
}
+static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
+{
+ int set_val = 0;
+ int i;
+
+ if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
+ set_val = 1;
+
+ dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = 1; i <= common->port_num; i++) {
+ struct am65_cpsw_port *port = am65_common_get_port(common, i);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
+
+ priv->offload_fwd_mark = set_val;
+ }
+}
+
+bool am65_cpsw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ return !common->is_emac_mode;
+ }
+
+ return false;
+}
+
+static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ if (!common->br_members) {
+ common->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (common->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ common->br_members |= BIT(priv->port->port_id);
+
+ am65_cpsw_port_offload_fwd_mark_update(common);
+
+ return NOTIFY_DONE;
+}
+
+static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ common->br_members &= ~BIT(priv->port->port_id);
+
+ am65_cpsw_port_offload_fwd_mark_update(common);
+
+ if (!common->br_members)
+ common->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int am65_cpsw_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!am65_cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = am65_cpsw_netdevice_port_link(ndev, info->upper_dev);
+ else
+ am65_cpsw_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ if (AM65_CPSW_IS_CPSW2G(cpsw) ||
+ !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ return 0;
+
+ cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
+ ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+ if (ret) {
+ dev_err(cpsw->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = am65_cpsw_switchdev_register_notifiers(cpsw);
+ if (ret)
+ unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+
+ return ret;
+}
+
+static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+ if (AM65_CPSW_IS_CPSW2G(cpsw) ||
+ !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ return;
+
+ am65_cpsw_switchdev_unregister_notifiers(cpsw);
+ unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+}
+
+static const struct devlink_ops am65_cpsw_devlink_ops = {};
+
+static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
+{
+ cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
+ ALE_MCAST_BLOCK_LEARN_FWD);
+}
+
+static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+
+ writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ am65_cpsw_init_stp_ale_entry(common);
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
+}
+
+static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+
+ writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
+
+ /* learning make no sense in multi-mac mode */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
+}
+
+static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct am65_cpsw_common *common = dl_priv->common;
+
+ dev_dbg(common->dev, "%s id:%u\n", __func__, id);
+
+ if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = !common->is_emac_mode;
+
+ return 0;
+}
+
+static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_slave_data *slave = &port->slave;
+ struct am65_cpsw_common *common = port->common;
+ u32 port_mask;
+
+ writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ if (slave->mac_only)
+ /* enable mac-only mode on port */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY, 1);
+
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+
+ cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
+ cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
+}
+
+static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_slave_data *slave = &port->slave;
+ struct am65_cpsw_common *cpsw = port->common;
+ u32 port_mask;
+
+ cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_NOLEARN, 0);
+
+ cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
+ slave->port_vlan);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+
+ cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD_2);
+
+ writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_MACONLY, 0);
+}
+
+static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct am65_cpsw_common *cpsw = dl_priv->common;
+ bool switch_en = ctx->val.vbool;
+ bool if_running = false;
+ int i;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == !cpsw->is_emac_mode)
+ return 0;
+
+ if (!switch_en && cpsw->br_members) {
+ dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ cpsw->is_emac_mode = !switch_en;
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ if_running = true;
+ }
+
+ if (!if_running) {
+ /* all ndevs are down */
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_slave_data *slave;
+
+ if (!sl_ndev)
+ continue;
+
+ slave = am65_ndev_to_slave(sl_ndev);
+ if (switch_en)
+ slave->port_vlan = cpsw->default_vlan;
+ else
+ slave->port_vlan = 0;
+ }
+
+ goto exit;
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+ /* clean up ALE table */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
+
+ if (switch_en) {
+ dev_info(cpsw->dev, "Enable switch mode\n");
+
+ am65_cpsw_init_host_port_switch(cpsw);
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_slave_data *slave;
+ struct am65_cpsw_port *port;
+
+ if (!sl_ndev)
+ continue;
+
+ port = am65_ndev_to_port(sl_ndev);
+ slave = am65_ndev_to_slave(sl_ndev);
+ slave->port_vlan = cpsw->default_vlan;
+
+ if (netif_running(sl_ndev))
+ am65_cpsw_init_port_switch_ale(port);
+ }
+
+ } else {
+ dev_info(cpsw->dev, "Disable switch mode\n");
+
+ am65_cpsw_init_host_port_emac(cpsw);
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_port *port;
+
+ if (!sl_ndev)
+ continue;
+
+ port = am65_ndev_to_port(sl_ndev);
+ port->slave.port_vlan = 0;
+ if (netif_running(sl_ndev))
+ am65_cpsw_init_port_emac_ale(port);
+ }
+ }
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static const struct devlink_param am65_cpsw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ am65_cpsw_dl_switch_mode_get,
+ am65_cpsw_dl_switch_mode_set, NULL),
+};
+
+static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common)
+{
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ if (dl_port->registered)
+ devlink_port_unregister(dl_port);
+ }
+}
+
+static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
+{
+ struct devlink_port_attrs attrs = {};
+ struct am65_cpsw_devlink *dl_priv;
+ struct device *dev = common->dev;
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int ret = 0;
+ int i;
+
+ common->devlink =
+ devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv));
+ if (!common->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(common->devlink);
+ dl_priv->common = common;
+
+ ret = devlink_register(common->devlink, dev);
+ if (ret) {
+ dev_err(dev, "devlink reg fail ret:%d\n", ret);
+ goto dl_free;
+ }
+
+ /* Provide devlink hook to switch mode when multiple external ports
+ * are present NUSS switchdev driver is enabled.
+ */
+ if (!AM65_CPSW_IS_CPSW2G(common) &&
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
+ ret = devlink_params_register(common->devlink,
+ am65_cpsw_devlink_params,
+ ARRAY_SIZE(am65_cpsw_devlink_params));
+ if (ret) {
+ dev_err(dev, "devlink params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+ devlink_params_publish(common->devlink);
+ }
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port->port_id;
+ attrs.switch_id.id_len = sizeof(resource_size_t);
+ memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
+ devlink_port_attrs_set(dl_port, &attrs);
+
+ ret = devlink_port_register(common->devlink, dl_port, port->port_id);
+ if (ret) {
+ dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
+ port->port_id, ret);
+ goto dl_port_unreg;
+ }
+ devlink_port_type_eth_set(dl_port, port->ndev);
+ }
+
+ return ret;
+
+dl_port_unreg:
+ am65_cpsw_unregister_devlink_ports(common);
+dl_unreg:
+ devlink_unregister(common->devlink);
+dl_free:
+ devlink_free(common->devlink);
+
+ return ret;
+}
+
+static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
+{
+ if (!AM65_CPSW_IS_CPSW2G(common) &&
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
+ devlink_params_unpublish(common->devlink);
+ devlink_params_unregister(common->devlink, am65_cpsw_devlink_params,
+ ARRAY_SIZE(am65_cpsw_devlink_params));
+ }
+
+ am65_cpsw_unregister_devlink_ports(common);
+ devlink_unregister(common->devlink);
+ devlink_free(common->devlink);
+}
+
static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
{
struct device *dev = common->dev;
@@ -2051,14 +2517,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
}
}
+ ret = am65_cpsw_register_notifiers(common);
+ if (ret)
+ goto err_cleanup_ndev;
+
+ ret = am65_cpsw_nuss_register_devlink(common);
+ if (ret)
+ goto clean_unregister_notifiers;
/* can't auto unregister ndev using devm_add_action() due to
* devres release sequence in DD core for DMA
*/
- return 0;
+ return 0;
+clean_unregister_notifiers:
+ am65_cpsw_unregister_notifiers(common);
err_cleanup_ndev:
am65_cpsw_nuss_cleanup_ndev(common);
+
return ret;
}
@@ -2102,9 +2578,16 @@ static const struct am65_cpsw_pdata j721e_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
};
+static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
+ { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2131,6 +2614,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct device_node *node;
struct resource *res;
struct clk *clk;
+ u64 id_temp;
int ret, i;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
@@ -2150,6 +2634,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (IS_ERR(common->ss_base))
return PTR_ERR(common->ss_base);
common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
+ /* Use device's physical base address as switch id */
+ id_temp = cpu_to_be64(res->start);
+ memcpy(common->switch_id, &id_temp, sizeof(res->start));
node = of_get_child_by_name(dev->of_node, "ethernet-ports");
if (!node)
@@ -2163,12 +2650,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
init_completion(&common->tdown_complete);
common->tx_ch_num = 1;
common->pf_p0_rx_ptype_rrobin = false;
-
- ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (ret) {
- dev_err(dev, "error setting dma mask: %d\n", ret);
- return ret;
- }
+ common->default_vlan = 1;
common->ports = devm_kcalloc(dev, common->port_num,
sizeof(*common->ports),
@@ -2248,6 +2730,8 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
dev_set_drvdata(dev, common);
+ common->is_emac_mode = true;
+
ret = am65_cpsw_nuss_init_ndevs(common);
if (ret)
goto err_of_clear;
@@ -2281,6 +2765,9 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
return ret;
}
+ am65_cpsw_unregister_devlink(common);
+ am65_cpsw_unregister_notifiers(common);
+
/* must unregister ndevs here because DD release_driver routine calls
* dma_deconfigure(dev) before devres_release_all(dev)
*/
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 02aed4c0ceba..5d93e346f05e 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -6,12 +6,14 @@
#ifndef AM65_CPSW_NUSS_H_
#define AM65_CPSW_NUSS_H_
+#include <linux/if_ether.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
+#include <net/devlink.h>
#include "am65-cpsw-qos.h"
struct am65_cpts;
@@ -22,6 +24,8 @@ struct am65_cpts;
#define AM65_CPSW_MAX_RX_QUEUES 1
#define AM65_CPSW_MAX_RX_FLOWS 1
+#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
+
struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
@@ -32,6 +36,7 @@ struct am65_cpsw_slave_data {
bool rx_pause;
bool tx_pause;
u8 mac_addr[ETH_ALEN];
+ int port_vlan;
};
struct am65_cpsw_port {
@@ -47,6 +52,7 @@ struct am65_cpsw_port {
bool tx_ts_enabled;
bool rx_ts_enabled;
struct am65_cpsw_qos qos;
+ struct devlink_port devlink_port;
};
struct am65_cpsw_host {
@@ -56,6 +62,7 @@ struct am65_cpsw_host {
};
struct am65_cpsw_tx_chn {
+ struct device *dma_dev;
struct napi_struct napi_tx;
struct am65_cpsw_common *common;
struct k3_cppi_desc_pool *desc_pool;
@@ -69,6 +76,7 @@ struct am65_cpsw_tx_chn {
struct am65_cpsw_rx_chn {
struct device *dev;
+ struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
u32 descs_num;
@@ -83,6 +91,15 @@ struct am65_cpsw_pdata {
const char *ale_dev_id;
};
+enum cpsw_devlink_param_id {
+ AM65_CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ AM65_CPSW_DL_PARAM_SWITCH_MODE,
+};
+
+struct am65_cpsw_devlink {
+ struct am65_cpsw_common *common;
+};
+
struct am65_cpsw_common {
struct device *dev;
struct device *mdio_dev;
@@ -115,6 +132,14 @@ struct am65_cpsw_common {
bool pf_p0_rx_ptype_rrobin;
struct am65_cpts *cpts;
int est_enabled;
+
+ bool is_emac_mode;
+ u16 br_members;
+ int default_vlan;
+ struct devlink *devlink;
+ struct net_device *hw_bridge_dev;
+ struct notifier_block am65_cpsw_netdevice_nb;
+ unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
};
struct am65_cpsw_ndev_stats {
@@ -129,6 +154,7 @@ struct am65_cpsw_ndev_priv {
u32 msg_enable;
struct am65_cpsw_port *port;
struct am65_cpsw_ndev_stats __percpu *stats;
+ bool offload_fwd_mark;
};
#define am65_ndev_to_priv(ndev) \
@@ -156,4 +182,6 @@ void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+bool am65_cpsw_port_dev_check(const struct net_device *dev);
+
#endif /* AM65_CPSW_NUSS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 3bdd4dbcd2ff..ebcc6386cc34 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -356,7 +356,7 @@ static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
}
-/**
+/*
* Enable ESTf periodic output, set cycle start time and interval.
*/
static int am65_cpsw_timer_set(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
new file mode 100644
index 000000000000..d93ffd8a08b0
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments K3 AM65 Ethernet Switchdev Driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-switchdev.h"
+#include "cpsw_ale.h"
+
+struct am65_cpsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct am65_cpsw_port *port;
+ unsigned long event;
+};
+
+static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ u8 cpsw_state;
+ int ret = 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ cpsw_state = ALE_PORT_STATE_FORWARD;
+ break;
+ case BR_STATE_LEARNING:
+ cpsw_state = ALE_PORT_STATE_LEARN;
+ break;
+ case BR_STATE_DISABLED:
+ cpsw_state = ALE_PORT_STATE_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ cpsw_state = ALE_PORT_STATE_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_STATE, cpsw_state);
+ netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state);
+
+ return ret;
+}
+
+static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
+ struct net_device *orig_dev,
+ struct switchdev_brport_flags flags)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool unreg_mcast_add = false;
+
+ if (flags.val & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+
+ netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, port->port_id);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
+ unreg_mcast_add);
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int am65_cpsw_port_attr_set(struct net_device *ndev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret;
+
+ netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = am65_cpsw_port_attr_br_flags_pre_set(ndev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = am65_cpsw_port_stp_state_set(port, attr->u.stp_state);
+ netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = am65_cpsw_port_attr_br_flags_set(port, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
+ u32 pvid;
+
+ if (port->port_id)
+ pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ else
+ pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ pvid = pvid & 0xfff;
+
+ return pvid;
+}
+
+static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
+ u32 pvid;
+
+ pvid = vid;
+ pvid |= cfi ? BIT(12) : 0;
+ pvid |= (cos & 0x7) << 13;
+
+ if (port->port_id)
+ writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ else
+ writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+}
+
+static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid,
+ u16 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int unreg_mcast_mask = 0;
+ int reg_mcast_mask = 0;
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+ u32 flags;
+
+ if (cpu_port) {
+ port_mask = BIT(HOST_PORT_NUM);
+ flags = orig_dev->flags;
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = BIT(port->port_id);
+ flags = port->ndev->flags;
+ }
+
+ if (flags & IFF_MULTICAST)
+ reg_mcast_mask = port_mask;
+
+ if (untag)
+ untag_mask = port_mask;
+
+ ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
+ reg_mcast_mask, unreg_mcast_mask);
+ if (ret) {
+ netdev_err(port->ndev, "Unable to add vlan\n");
+ return ret;
+ }
+
+ if (cpu_port)
+ cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr,
+ HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid);
+ if (!pvid)
+ return ret;
+
+ am65_cpsw_set_pvid(port, vid, 0, 0);
+
+ netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n",
+ port->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(port->port_id);
+
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ if (ret != 0)
+ return ret;
+
+ /* We don't care for the return value here, error is returned only if
+ * the unicast entry is not present
+ */
+ if (cpu_port)
+ cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+
+ if (vid == am65_cpsw_get_pvid(port))
+ am65_cpsw_set_pvid(port, 0, 0, 0);
+
+ /* We don't care for the return value here, error is returned only if
+ * the multicast entry is not present
+ */
+ cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask,
+ ALE_VLAN, vid);
+ netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n",
+ port->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
+ port->ndev->name, vlan->vid, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
+}
+
+static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+
+{
+ return am65_cpsw_port_vlan_del(port, vlan->vid, vlan->obj.orig_dev);
+}
+
+static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_mask;
+ int err;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(port->port_id);
+
+ err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
+ ALE_VLAN, mdb->vid, 0);
+ netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM ports: %X\n",
+ port->ndev->name, mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int del_mask;
+
+ if (cpu_port)
+ del_mask = BIT(HOST_PORT_NUM);
+ else
+ del_mask = BIT(port->port_id);
+
+ /* Ignore error as error code is returned only when entry is already removed */
+ cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
+ ALE_VLAN, mdb->vid);
+ netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM ports: %X\n",
+ port->ndev->name, mdb->vid, mdb->addr, del_mask);
+
+ return 0;
+}
+
+static int am65_cpsw_port_obj_add(struct net_device *ndev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = am65_cpsw_port_vlans_add(port, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = am65_cpsw_port_mdb_add(port, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int am65_cpsw_port_obj_del(struct net_device *ndev,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = am65_cpsw_port_vlans_del(port, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = am65_cpsw_port_mdb_del(port, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void am65_cpsw_switchdev_event_work(struct work_struct *work)
+{
+ struct am65_cpsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct am65_cpsw_switchdev_event_work, work);
+ struct am65_cpsw_port *port = switchdev_work->port;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_id = port->port_id;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port_id = HOST_PORT_NUM;
+
+ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ am65_cpsw_fdb_offload_notify(port->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port_id = HOST_PORT_NUM;
+
+ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int am65_cpsw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct am65_cpsw_switchdev_event_work *switchdev_work;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!am65_cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
+ switchdev_work->port = port;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block cpsw_switchdev_notifier = {
+ .notifier_call = am65_cpsw_switchdev_event,
+};
+
+static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpsw_switchdev_bl_notifier = {
+ .notifier_call = am65_cpsw_switchdev_blocking_event,
+};
+
+int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+ }
+
+ return ret;
+}
+
+void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+ unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.h b/drivers/net/ethernet/ti/am65-cpsw-switchdev.h
new file mode 100644
index 000000000000..a67a7606bc80
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
+#define DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
+
+#include <linux/skbuff.h>
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)
+static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
+{
+ skb->offload_fwd_mark = val;
+}
+
+int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw);
+void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw);
+#else
+static inline int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+}
+
+static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
+{
+}
+
+#endif
+
+#endif /* DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 5dc60ecabe56..9caaae79fc95 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -727,7 +727,7 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
/**
* am65_cpts_rx_enable - enable rx timestamping
* @cpts: cpts handle
- * @skb: packet
+ * @en: enable
*
* This functions enables rx packets timestamping. The CPTS can timestamp all
* rx packets.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b0f00b4edd94..fd966567464c 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -392,29 +392,21 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
+ int headroom = CPSW_HEADROOM, size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
- xdp.data = pa + CPSW_HEADROOM +
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- xdp.data_end = xdp.data + len -
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- } else {
- xdp.data = pa + CPSW_HEADROOM;
- xdp.data_end = xdp.data + len;
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_set_data_meta_invalid(&xdp);
-
- xdp.data_hard_start = pa;
- xdp.rxq = &priv->xdp_rxq[ch];
- xdp.frame_sz = PAGE_SIZE;
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
port = priv->emac_port + cpsw->data.dual_emac;
- ret = cpsw_run_xdp(priv, ch, &xdp, page, port);
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
- /* XDP prog might have changed packet data and boundaries */
- len = xdp.data_end - xdp.data;
headroom = xdp.data - xdp.data_hard_start;
/* XDP prog can modify vlan tag, so can't use encap header */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index cdc308a2aa3e..d828f856237a 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -1256,6 +1256,13 @@ static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
.major_ver_mask = 0x7,
.vlan_entry_tbl = vlan_entry_k3_cpswxg,
},
+ {
+ .dev_id = "am64-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ .tbl_entries = 512,
+ },
{ },
};
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 2f5e0ad23ad7..58a64313ac00 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -335,28 +335,20 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
+ int headroom = CPSW_HEADROOM, size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
- xdp.data = pa + CPSW_HEADROOM +
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- xdp.data_end = xdp.data + len -
- CPSW_RX_VLAN_ENCAP_HDR_SIZE;
- } else {
- xdp.data = pa + CPSW_HEADROOM;
- xdp.data_end = xdp.data + len;
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
}
- xdp_set_data_meta_invalid(&xdp);
-
- xdp.data_hard_start = pa;
- xdp.rxq = &priv->xdp_rxq[ch];
- xdp.frame_sz = PAGE_SIZE;
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
- ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port);
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
if (ret != CPSW_XDP_PASS)
goto requeue;
- /* XDP prog might have changed packet data and boundaries */
- len = xdp.data_end - xdp.data;
headroom = xdp.data - xdp.data_hard_start;
/* XDP prog can modify vlan tag, so can't use encap header */
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 99f44563e10f..bb59e768915e 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1323,7 +1323,7 @@ int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
}
int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page, int port)
+ struct page *page, int port, int *len)
{
struct cpsw_common *cpsw = priv->cpsw;
struct net_device *ndev = priv->ndev;
@@ -1341,10 +1341,13 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
}
act = bpf_prog_run_xdp(prog, xdp);
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
switch (act) {
case XDP_PASS:
ret = CPSW_XDP_PASS;
- break;
+ goto out;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
@@ -1370,8 +1373,13 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
trace_xdp_exception(ndev, prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
goto drop;
}
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
out:
rcu_read_unlock();
return ret;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 7b7f3596b20d..a323bea54faa 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -438,7 +438,7 @@ int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
struct page *page, int port);
int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
- struct page *page, int port);
+ struct page *page, int port, int *len);
irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
index 29747da5c514..a72bb570756f 100644
--- a/drivers/net/ethernet/ti/cpsw_switchdev.c
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -24,16 +24,12 @@ struct cpsw_switchdev_event_work {
unsigned long event;
};
-static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
- struct switchdev_trans *trans, u8 state)
+static int cpsw_port_stp_state_set(struct cpsw_priv *priv, u8 state)
{
struct cpsw_common *cpsw = priv->cpsw;
u8 cpsw_state;
int ret = 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
switch (state) {
case BR_STATE_FORWARDING:
cpsw_state = ALE_PORT_STATE_FORWARD;
@@ -60,32 +56,31 @@ static int cpsw_port_stp_state_set(struct cpsw_priv *priv,
}
static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
- struct switchdev_trans *trans,
struct net_device *orig_dev,
- unsigned long brport_flags)
+ struct switchdev_brport_flags flags)
{
struct cpsw_common *cpsw = priv->cpsw;
- bool unreg_mcast_add = false;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool unreg_mcast_add = false;
- if (brport_flags & BR_MCAST_FLOOD)
- unreg_mcast_add = true;
- dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
- unreg_mcast_add, priv->emac_port);
+ if (flags.val & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
- cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
- unreg_mcast_add);
+ dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, priv->emac_port);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
+ unreg_mcast_add);
+ }
return 0;
}
static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
- struct switchdev_trans *trans,
- unsigned long flags)
+ struct switchdev_brport_flags flags)
{
- if (flags & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
return -EINVAL;
return 0;
@@ -93,7 +88,7 @@ static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
static int cpsw_port_attr_set(struct net_device *ndev,
const struct switchdev_attr *attr,
- struct switchdev_trans *trans)
+ struct netlink_ext_ack *extack)
{
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
@@ -102,15 +97,15 @@ static int cpsw_port_attr_set(struct net_device *ndev,
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
- ret = cpsw_port_attr_br_flags_pre_set(ndev, trans,
+ ret = cpsw_port_attr_br_flags_pre_set(ndev,
attr->u.brport_flags);
break;
case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
- ret = cpsw_port_stp_state_set(priv, trans, attr->u.stp_state);
+ ret = cpsw_port_stp_state_set(priv, attr->u.stp_state);
dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state);
break;
case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
- ret = cpsw_port_attr_br_flags_set(priv, trans, attr->orig_dev,
+ ret = cpsw_port_attr_br_flags_set(priv, attr->orig_dev,
attr->u.brport_flags);
break;
default:
@@ -253,56 +248,24 @@ static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
}
static int cpsw_port_vlans_add(struct cpsw_priv *priv,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+ const struct switchdev_obj_port_vlan *vlan)
{
bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
struct net_device *orig_dev = vlan->obj.orig_dev;
bool cpu_port = netif_is_bridge_master(orig_dev);
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- u16 vid;
dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
- priv->ndev->name, vlan->vid_begin, vlan->flags);
+ priv->ndev->name, vlan->vid, vlan->flags);
if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
return 0;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = cpsw_port_vlan_add(priv, untag, pvid, vid, orig_dev);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static int cpsw_port_vlans_del(struct cpsw_priv *priv,
- const struct switchdev_obj_port_vlan *vlan)
-
-{
- struct net_device *orig_dev = vlan->obj.orig_dev;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int err;
-
- err = cpsw_port_vlan_del(priv, vid, orig_dev);
- if (err)
- return err;
- }
-
- return 0;
+ return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
}
static int cpsw_port_mdb_add(struct cpsw_priv *priv,
- struct switchdev_obj_port_mdb *mdb,
- struct switchdev_trans *trans)
+ struct switchdev_obj_port_mdb *mdb)
{
struct net_device *orig_dev = mdb->obj.orig_dev;
@@ -311,9 +274,6 @@ static int cpsw_port_mdb_add(struct cpsw_priv *priv,
int port_mask;
int err;
- if (switchdev_trans_ph_prepare(trans))
- return 0;
-
if (cpu_port)
port_mask = BIT(HOST_PORT_NUM);
else
@@ -352,7 +312,6 @@ static int cpsw_port_mdb_del(struct cpsw_priv *priv,
static int cpsw_port_obj_add(struct net_device *ndev,
const struct switchdev_obj *obj,
- struct switchdev_trans *trans,
struct netlink_ext_ack *extack)
{
struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
@@ -365,11 +324,11 @@ static int cpsw_port_obj_add(struct net_device *ndev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = cpsw_port_vlans_add(priv, vlan, trans);
+ err = cpsw_port_vlans_add(priv, vlan);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
- err = cpsw_port_mdb_add(priv, mdb, trans);
+ err = cpsw_port_mdb_add(priv, mdb);
break;
default:
err = -EOPNOTSUPP;
@@ -392,7 +351,7 @@ static int cpsw_port_obj_del(struct net_device *ndev,
switch (obj->id) {
case SWITCHDEV_OBJ_ID_PORT_VLAN:
- err = cpsw_port_vlans_del(priv, vlan);
+ err = cpsw_port_vlan_del(priv, vlan->vid, vlan->obj.orig_dev);
break;
case SWITCHDEV_OBJ_ID_PORT_MDB:
case SWITCHDEV_OBJ_ID_HOST_MDB:
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 3d1fc8d2ca66..55e652624bd7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1100,7 +1100,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
return packets_done;
}
-/**
+/*
* gelic_card_interrupt - event handler for gelic_net
*/
static irqreturn_t gelic_card_interrupt(int irq, void *ptr)
@@ -1400,6 +1400,7 @@ out:
/**
* gelic_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
+ * @txqueue: unused
*
* called, if tx hangs. Schedules a task that resets the interface
*/
@@ -1431,6 +1432,7 @@ static const struct net_device_ops gelic_netdevice_ops = {
/**
* gelic_ether_setup_netdev_ops - initialization of net_device operations
* @netdev: net_device structure
+ * @napi: napi structure
*
* fills out function pointers in the net_device structure
*/
@@ -1632,7 +1634,7 @@ static void gelic_card_get_vlan_info(struct gelic_card *card)
dev_info(ctodev(card), "internal vlan %s\n",
card->vlan_required? "enabled" : "disabled");
}
-/**
+/*
* ps3_gelic_driver_probe - add a device to the control of this driver
*/
static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
@@ -1787,7 +1789,7 @@ fail_open:
return result;
}
-/**
+/*
* ps3_gelic_driver_remove - remove a device from the control of this driver
*/
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5f5b33e6653b..d5a75ef7e3ca 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -254,7 +254,7 @@ spider_net_set_promisc(struct spider_net_card *card)
/**
* spider_net_get_descr_status -- returns the status of a descriptor
- * @descr: descriptor to look at
+ * @hwdescr: descriptor to look at
*
* returns the status as in the dmac_cmd_status field of the descriptor
*/
@@ -542,6 +542,7 @@ error:
/**
* spider_net_get_multicast_hash - generates hash for multicast filter table
+ * @netdev: interface device structure
* @addr: multicast address
*
* returns the hash value.
@@ -890,7 +891,7 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/**
* spider_net_cleanup_tx_ring - cleans up the TX ring
- * @card: card structure
+ * @t: timer context used to obtain the pointer to net card data structure
*
* spider_net_cleanup_tx_ring is called by either the tx_timer
* or from the NAPI polling routine.
@@ -1063,6 +1064,7 @@ static void show_rx_chain(struct spider_net_card *card)
/**
* spider_net_resync_head_ptr - Advance head ptr past empty descrs
+ * @card: card structure
*
* If the driver fails to keep up and empty the queue, then the
* hardware wil run out of room to put incoming packets. This
@@ -1220,7 +1222,7 @@ bad_desc:
/**
* spider_net_poll - NAPI poll function called by the stack to return packets
- * @netdev: interface device structure
+ * @napi: napi device structure
* @budget: number of packets we can pass to the stack at most
*
* returns 0 if no more packets available to the driver/stack. Returns 1,
@@ -1268,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
/**
* spider_net_set_mac - sets the MAC of an interface
* @netdev: interface device structure
- * @ptr: pointer to new MAC address
+ * @p: pointer to new MAC address
*
* Returns 0 on success, <0 on failure. Currently, we don't support this
* and will always return EOPNOTSUPP.
@@ -1340,6 +1342,8 @@ spider_net_link_reset(struct net_device *netdev)
* spider_net_handle_error_irq - handles errors raised by an interrupt
* @card: card structure
* @status_reg: interrupt status register 0 (GHIINT0STS)
+ * @error_reg1: interrupt status register 1 (GHIINT1STS)
+ * @error_reg2: interrupt status register 2 (GHIINT2STS)
*
* spider_net_handle_error_irq treats or ignores all error conditions
* found when an interrupt is presented
@@ -1961,8 +1965,7 @@ init_firmware_failed:
/**
* spider_net_link_phy
- * @data: used for pointer to card structure
- *
+ * @t: timer context used to obtain the pointer to net card data structure
*/
static void spider_net_link_phy(struct timer_list *t)
{
@@ -2140,7 +2143,7 @@ spider_net_stop(struct net_device *netdev)
/**
* spider_net_tx_timeout_task - task scheduled by the watchdog timeout
* function (to be called not under interrupt status)
- * @data: data, is interface device structure
+ * @work: work context used to obtain the pointer to net card data structure
*
* called as task when tx hangs, resets interface (if interface is up)
*/
@@ -2174,6 +2177,7 @@ out:
/**
* spider_net_tx_timeout - called when the tx timeout watchdog kicks in.
* @netdev: interface device structure
+ * @txqueue: unused
*
* called, if tx hangs. Schedules a task that resets the interface
*/
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 3b2137d1f4c6..c6eb7f2368aa 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -18,7 +18,6 @@ if NET_VENDOR_XILINX
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
- depends on PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || COMPILE_TEST
select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index a03c3ca1b28d..1e966a39967e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -339,6 +339,10 @@
#define DELAY_OF_ONE_MILLISEC 1000
+/* Xilinx PCS/PMA PHY register for switching 1000BaseX or SGMII */
+#define XLNX_MII_STD_SELECT_REG 0x11
+#define XLNX_MII_STD_SELECT_SGMII BIT(0)
+
/**
* struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer
@@ -377,22 +381,29 @@ struct axidma_bd {
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
* @phy_node: Pointer to device node structure
+ * @phylink: Pointer to phylink instance
+ * @phylink_config: phylink configuration settings
+ * @pcs_phy: Reference to PCS/PMA PHY if used
+ * @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core
+ * @clk: Clock for AXI bus
* @mii_bus: Pointer to MII bus structure
* @mii_clk_div: MII bus clock divider value
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
- * @dma_err_tasklet: Tasklet structure to process Axi DMA errors
+ * @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
+ * @eth_irq: Ethernet core IRQ number
* @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
- * @last_link: Phy link state in which the PHY was negotiated earlier
* @features: Stores the extended features supported by the axienet hw
* @tx_bd_v: Virtual address of the TX buffer descriptor ring
* @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @tx_bd_num: Size of TX buffer descriptor ring
* @rx_bd_v: Virtual address of the RX buffer descriptor ring
* @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @rx_bd_num: Size of RX buffer descriptor ring
* @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
* accessed currently. Used while alloc. BDs before a TX starts
* @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
@@ -414,23 +425,20 @@ struct axienet_local {
struct net_device *ndev;
struct device *dev;
- /* Connection to PHY device */
struct device_node *phy_node;
struct phylink *phylink;
struct phylink_config phylink_config;
- /* Reference to PCS/PMA PHY if used */
struct mdio_device *pcs_phy;
- /* Clock for AXI bus */
+ bool switch_x_sgmii;
+
struct clk *clk;
- /* MDIO bus data */
- struct mii_bus *mii_bus; /* MII bus reference */
- u8 mii_clk_div; /* MII bus clock divider value */
+ struct mii_bus *mii_bus;
+ u8 mii_clk_div;
- /* IO registers, dma functions and IRQs */
resource_size_t regs_start;
void __iomem *regs;
void __iomem *dma_regs;
@@ -442,10 +450,9 @@ struct axienet_local {
int eth_irq;
phy_interface_t phy_mode;
- u32 options; /* Current options word */
+ u32 options;
u32 features;
- /* Buffer descriptors */
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
u32 tx_bd_num;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 6fea980acf64..3a8775e0ca55 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1469,6 +1469,13 @@ axienet_ethtools_set_link_ksettings(struct net_device *ndev,
return phylink_ethtool_ksettings_set(lp->phylink, cmd);
}
+static int axienet_ethtools_nway_reset(struct net_device *dev)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+
+ return phylink_ethtool_nway_reset(lp->phylink);
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
.get_drvinfo = axienet_ethtools_get_drvinfo,
@@ -1483,6 +1490,7 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.set_coalesce = axienet_ethtools_set_coalesce,
.get_link_ksettings = axienet_ethtools_get_link_ksettings,
.set_link_ksettings = axienet_ethtools_set_link_ksettings,
+ .nway_reset = axienet_ethtools_nway_reset,
};
static void axienet_validate(struct phylink_config *config,
@@ -1494,13 +1502,22 @@ static void axienet_validate(struct phylink_config *config,
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
/* Only support the mode we are configured for */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != lp->phy_mode) {
- netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
- phy_modes(state->interface),
- phy_modes(lp->phy_mode));
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ if (lp->switch_x_sgmii)
+ break;
+ fallthrough;
+ default:
+ if (state->interface != lp->phy_mode) {
+ netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
+ phy_modes(state->interface),
+ phy_modes(lp->phy_mode));
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
}
phylink_set(mask, Autoneg);
@@ -1560,6 +1577,33 @@ static void axienet_mac_an_restart(struct phylink_config *config)
phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
}
+static int axienet_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t iface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ int ret;
+
+ switch (iface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ if (!lp->switch_x_sgmii)
+ return 0;
+
+ ret = mdiobus_write(lp->pcs_phy->bus,
+ lp->pcs_phy->addr,
+ XLNX_MII_STD_SELECT_REG,
+ iface == PHY_INTERFACE_MODE_SGMII ?
+ XLNX_MII_STD_SELECT_SGMII : 0);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to switch PHY interface: %d\n",
+ ret);
+ return ret;
+ default:
+ return 0;
+ }
+}
+
static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -1637,6 +1681,7 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
.validate = axienet_validate,
.mac_pcs_get_state = axienet_mac_pcs_get_state,
.mac_an_restart = axienet_mac_an_restart,
+ .mac_prepare = axienet_mac_prepare,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
.mac_link_up = axienet_mac_link_up,
@@ -1817,6 +1862,18 @@ static int axienet_probe(struct platform_device *pdev)
lp->options = XAE_OPTION_DEFAULTS;
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+
+ lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(lp->clk)) {
+ ret = PTR_ERR(lp->clk);
+ goto free_netdev;
+ }
+ ret = clk_prepare_enable(lp->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
+ goto free_netdev;
+ }
+
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
@@ -1876,6 +1933,9 @@ static int axienet_probe(struct platform_device *pdev)
*/
of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
+ lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
+ "xlnx,switch-x-sgmii");
+
/* Start with the proprietary, and broken phy_type */
ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
if (!ret) {
@@ -1905,6 +1965,12 @@ static int axienet_probe(struct platform_device *pdev)
if (ret)
goto free_netdev;
}
+ if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
+ dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
+ ret = -EINVAL;
+ goto free_netdev;
+ }
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
@@ -1992,20 +2058,6 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
- lp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(lp->clk)) {
- dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
- PTR_ERR(lp->clk));
- lp->clk = NULL;
- } else {
- ret = clk_prepare_enable(lp->clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable clock: %d\n",
- ret);
- goto free_netdev;
- }
- }
-
ret = axienet_mdio_setup(lp);
if (ret)
dev_warn(&pdev->dev,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 008b9a40faad..007840d4a807 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1193,8 +1193,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
dev_info(dev,
- "Xilinx EmacLite at 0x%08X mapped to 0x%08lX, irq=%d\n",
- (unsigned int __force)ndev->mem_start,
+ "Xilinx EmacLite at 0x%08lX mapped to 0x%08lX, irq=%d\n",
+ (unsigned long __force)ndev->mem_start,
(unsigned long __force)lp->base_addr, ndev->irq);
return 0;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 2e5202923510..0152f1e70783 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -247,7 +247,7 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
}
#endif
-static spinlock_t mdio_lock;
+static DEFINE_SPINLOCK(mdio_lock);
static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
static struct mii_bus *mdio_bus;
static int ports_open;
@@ -528,7 +528,6 @@ static int ixp4xx_mdio_register(struct eth_regs __iomem *regs)
mdio_regs = regs;
__raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
- spin_lock_init(&mdio_lock);
mdio_bus->name = "IXP4xx MII Bus";
mdio_bus->read = &ixp4xx_mdio_read;
mdio_bus->write = &ixp4xx_mdio_write;